1 /*
   2  * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "gc/shared/oopStorage.inline.hpp"
  28 #include "gc/shared/oopStorageParState.inline.hpp"
  29 #include "gc/shared/weakProcessor.inline.hpp"
  30 #include "gc/shared/weakProcessorPhases.hpp"
  31 #include "gc/shared/weakProcessorPhaseTimes.hpp"
  32 #include "memory/allocation.inline.hpp"
  33 #include "memory/iterator.hpp"
  34 #include "runtime/globals.hpp"
  35 #include "utilities/macros.hpp"
  36 
  37 void WeakProcessor::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive) {
  38   FOR_EACH_WEAK_PROCESSOR_PHASE(phase) {
  39     if (WeakProcessorPhases::is_serial(phase)) {
  40       WeakProcessorPhases::processor(phase)(is_alive, keep_alive);
  41     } else {
  42       if (WeakProcessorPhases::is_stringtable(phase)) {
  43         StringTable::reset_dead_counter();
  44 
  45         CountingSkippedIsAliveClosure<BoolObjectClosure, OopClosure> cl(is_alive, keep_alive);
  46         WeakProcessorPhases::oop_storage(phase)->oops_do(&cl);
  47 
  48         StringTable::inc_dead_counter(cl.num_dead() + cl.num_skipped());
  49         StringTable::finish_dead_counter();
  50       } else {
  51         WeakProcessorPhases::oop_storage(phase)->weak_oops_do(is_alive, keep_alive);
  52       }
  53     }
  54   }
  55 }
  56 
  57 void WeakProcessor::oops_do(OopClosure* closure) {
  58   AlwaysTrueClosure always_true;
  59   weak_oops_do(&always_true, closure);
  60 }
  61 
  62 uint WeakProcessor::ergo_workers(uint max_workers) {
  63   // Ignore ParallelRefProcEnabled; that's for j.l.r.Reference processing.
  64   if (ReferencesPerThread == 0) {
  65     // Configuration says always use all the threads.
  66     return max_workers;
  67   }
  68 
  69   // One thread per ReferencesPerThread references (or fraction thereof)
  70   // in the various OopStorage objects, bounded by max_threads.
  71   //
  72   // Serial phases are ignored in this calculation, because of the
  73   // cost of running unnecessary threads.  These phases are normally
  74   // small or empty (assuming they are configured to exist at all),
  75   // and development oriented, so not allocating any threads
  76   // specifically for them is okay.
  77   size_t ref_count = 0;
  78   FOR_EACH_WEAK_PROCESSOR_OOP_STORAGE_PHASE(phase) {
  79     ref_count += WeakProcessorPhases::oop_storage(phase)->allocation_count();
  80   }
  81 
  82   // +1 to (approx) round up the ref per thread division.
  83   size_t nworkers = 1 + (ref_count / ReferencesPerThread);
  84   nworkers = MIN2(nworkers, static_cast<size_t>(max_workers));
  85   return static_cast<uint>(nworkers);
  86 }
  87 
  88 void WeakProcessor::Task::initialize() {
  89   assert(_nworkers != 0, "must be");
  90   assert(_phase_times == NULL || _nworkers <= _phase_times->max_threads(),
  91          "nworkers (%u) exceeds max threads (%u)",
  92          _nworkers, _phase_times->max_threads());
  93 
  94   if (_phase_times) {
  95     _phase_times->set_active_workers(_nworkers);
  96   }
  97 
  98   uint storage_count = WeakProcessorPhases::oop_storage_phase_count;
  99   _storage_states = NEW_C_HEAP_ARRAY(StorageState, storage_count, mtGC);
 100 
 101   StorageState* states = _storage_states;
 102   FOR_EACH_WEAK_PROCESSOR_OOP_STORAGE_PHASE(phase) {
 103     OopStorage* storage = WeakProcessorPhases::oop_storage(phase);
 104     new (states++) StorageState(storage, _nworkers);
 105   }
 106   StringTable::reset_dead_counter();
 107 }
 108 
 109 WeakProcessor::Task::Task(uint nworkers) :
 110   _phase_times(NULL),
 111   _nworkers(nworkers),
 112   _serial_phases_done(WeakProcessorPhases::serial_phase_count),
 113   _storage_states(NULL)
 114 {
 115   initialize();
 116 }
 117 
 118 WeakProcessor::Task::Task(WeakProcessorPhaseTimes* phase_times, uint nworkers) :
 119   _phase_times(phase_times),
 120   _nworkers(nworkers),
 121   _serial_phases_done(WeakProcessorPhases::serial_phase_count),
 122   _storage_states(NULL)
 123 {
 124   initialize();
 125 }
 126 
 127 WeakProcessor::Task::~Task() {
 128   if (_storage_states != NULL) {
 129     StorageState* states = _storage_states;
 130     FOR_EACH_WEAK_PROCESSOR_OOP_STORAGE_PHASE(phase) {
 131       states->StorageState::~StorageState();
 132       ++states;
 133     }
 134     FREE_C_HEAP_ARRAY(StorageState, _storage_states);
 135   }
 136   StringTable::finish_dead_counter();
 137 }
 138 
 139 void WeakProcessor::GangTask::work(uint worker_id) {
 140   _erased_do_work(this, worker_id);
 141 }