1 /*
  2  * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/parallel/objectStartArray.hpp"
 27 #include "gc/parallel/parMarkBitMap.inline.hpp"
 28 #include "gc/parallel/parallelScavengeHeap.hpp"
 29 #include "gc/parallel/psCompactionManager.inline.hpp"
 30 #include "gc/parallel/psOldGen.hpp"
 31 #include "gc/parallel/psParallelCompact.inline.hpp"
 32 #include "gc/shared/taskqueue.inline.hpp"
 33 #include "logging/log.hpp"
 34 #include "memory/iterator.inline.hpp"
 35 #include "oops/access.inline.hpp"
 36 #include "oops/compressedOops.inline.hpp"
 37 #include "oops/flatArrayKlass.inline.hpp"
 38 #include "oops/instanceKlass.inline.hpp"
 39 #include "oops/instanceMirrorKlass.inline.hpp"
 40 #include "oops/objArrayKlass.inline.hpp"
 41 #include "oops/oop.inline.hpp"
 42 
 43 PSOldGen*               ParCompactionManager::_old_gen = nullptr;
 44 ParCompactionManager**  ParCompactionManager::_manager_array = nullptr;
 45 
 46 ParCompactionManager::OopTaskQueueSet*      ParCompactionManager::_oop_task_queues = nullptr;
 47 ParCompactionManager::ObjArrayTaskQueueSet* ParCompactionManager::_objarray_task_queues = nullptr;
 48 ParCompactionManager::RegionTaskQueueSet*   ParCompactionManager::_region_task_queues = nullptr;
 49 
 50 ObjectStartArray*    ParCompactionManager::_start_array = nullptr;
 51 ParMarkBitMap*       ParCompactionManager::_mark_bitmap = nullptr;
 52 GrowableArray<size_t >* ParCompactionManager::_shadow_region_array = nullptr;
 53 Monitor*                ParCompactionManager::_shadow_region_monitor = nullptr;
 54 
 55 ParCompactionManager::ParCompactionManager() {
 56 
 57   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 58 
 59   _old_gen = heap->old_gen();
 60   _start_array = old_gen()->start_array();
 61 
 62   reset_bitmap_query_cache();
 63 
 64   _deferred_obj_array = new (mtGC) GrowableArray<HeapWord*>(10, mtGC);
 65 }
 66 
 67 void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
 68   assert(ParallelScavengeHeap::heap() != nullptr,
 69     "Needed for initialization");
 70 
 71   _mark_bitmap = mbm;
 72 
 73   uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers();
 74 
 75   assert(_manager_array == nullptr, "Attempt to initialize twice");
 76   _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads, mtGC);
 77 
 78   _oop_task_queues = new OopTaskQueueSet(parallel_gc_threads);
 79   _objarray_task_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
 80   _region_task_queues = new RegionTaskQueueSet(parallel_gc_threads);
 81 
 82   // Create and register the ParCompactionManager(s) for the worker threads.
 83   for(uint i=0; i<parallel_gc_threads; i++) {
 84     _manager_array[i] = new ParCompactionManager();
 85     oop_task_queues()->register_queue(i, _manager_array[i]->oop_stack());
 86     _objarray_task_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
 87     region_task_queues()->register_queue(i, _manager_array[i]->region_stack());
 88   }
 89 
 90   assert(ParallelScavengeHeap::heap()->workers().max_workers() != 0,
 91     "Not initialized?");
 92 
 93   _shadow_region_array = new (mtGC) GrowableArray<size_t >(10, mtGC);
 94 
 95   _shadow_region_monitor = new Monitor(Mutex::nosafepoint, "CompactionManager_lock");
 96 }
 97 
 98 void ParCompactionManager::reset_all_bitmap_query_caches() {
 99   uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers();
100   for (uint i=0; i<parallel_gc_threads; i++) {
101     _manager_array[i]->reset_bitmap_query_cache();
102   }
103 }
104 
105 void ParCompactionManager::flush_all_string_dedup_requests() {
106   uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers();
107   for (uint i=0; i<parallel_gc_threads; i++) {
108     _manager_array[i]->flush_string_dedup_requests();
109   }
110 }
111 
112 ParCompactionManager*
113 ParCompactionManager::gc_thread_compaction_manager(uint index) {
114   assert(index < ParallelGCThreads, "index out of range");
115   assert(_manager_array != nullptr, "Sanity");
116   return _manager_array[index];
117 }
118 
119 inline void ParCompactionManager::publish_and_drain_oop_tasks() {
120   oop obj;
121   while (oop_stack()->pop_overflow(obj)) {
122     if (!oop_stack()->try_push_to_taskqueue(obj)) {
123       follow_contents(obj);
124     }
125   }
126   while (oop_stack()->pop_local(obj)) {
127     follow_contents(obj);
128   }
129 }
130 
131 bool ParCompactionManager::publish_or_pop_objarray_tasks(ObjArrayTask& task) {
132   while (_objarray_stack.pop_overflow(task)) {
133     if (!_objarray_stack.try_push_to_taskqueue(task)) {
134       return true;
135     }
136   }
137   return false;
138 }
139 
140 void ParCompactionManager::follow_marking_stacks() {
141   do {
142     // First, try to move tasks from the overflow stack into the shared buffer, so
143     // that other threads can steal. Otherwise process the overflow stack first.
144     publish_and_drain_oop_tasks();
145 
146     // Process ObjArrays one at a time to avoid marking stack bloat.
147     ObjArrayTask task;
148     if (publish_or_pop_objarray_tasks(task) ||
149         _objarray_stack.pop_local(task)) {
150       follow_array((objArrayOop)task.obj(), task.index());
151     }
152   } while (!marking_stacks_empty());
153 
154   assert(marking_stacks_empty(), "Sanity");
155 }
156 
157 void ParCompactionManager::drain_region_stacks() {
158   do {
159     // Drain overflow stack first so other threads can steal.
160     size_t region_index;
161     while (region_stack()->pop_overflow(region_index)) {
162       PSParallelCompact::fill_and_update_region(this, region_index);
163     }
164 
165     while (region_stack()->pop_local(region_index)) {
166       PSParallelCompact::fill_and_update_region(this, region_index);
167     }
168   } while (!region_stack()->is_empty());
169 }
170 
171 void ParCompactionManager::drain_deferred_objects() {
172   while (!_deferred_obj_array->is_empty()) {
173     HeapWord* addr = _deferred_obj_array->pop();
174     assert(addr != nullptr, "expected a deferred object");
175     PSParallelCompact::update_deferred_object(this, addr);
176   }
177   _deferred_obj_array->clear_and_deallocate();
178 }
179 
180 size_t ParCompactionManager::pop_shadow_region_mt_safe(PSParallelCompact::RegionData* region_ptr) {
181   MonitorLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag);
182   while (true) {
183     if (!_shadow_region_array->is_empty()) {
184       return _shadow_region_array->pop();
185     }
186     // Check if the corresponding heap region is available now.
187     // If so, we don't need to get a shadow region anymore, and
188     // we return InvalidShadow to indicate such a case.
189     if (region_ptr->claimed()) {
190       return InvalidShadow;
191     }
192     ml.wait(1);
193   }
194 }
195 
196 void ParCompactionManager::push_shadow_region_mt_safe(size_t shadow_region) {
197   MonitorLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag);
198   _shadow_region_array->push(shadow_region);
199   ml.notify();
200 }
201 
202 void ParCompactionManager::push_shadow_region(size_t shadow_region) {
203   _shadow_region_array->push(shadow_region);
204 }
205 
206 void ParCompactionManager::remove_all_shadow_regions() {
207   _shadow_region_array->clear();
208 }
209 
210 void ParCompactionManager::push_deferred_object(HeapWord* addr) {
211   _deferred_obj_array->push(addr);
212 }
213 
214 #ifdef ASSERT
215 void ParCompactionManager::verify_all_marking_stack_empty() {
216   uint parallel_gc_threads = ParallelGCThreads;
217   for (uint i = 0; i < parallel_gc_threads; i++) {
218     assert(_manager_array[i]->marking_stacks_empty(), "Marking stack should be empty");
219   }
220 }
221 
222 void ParCompactionManager::verify_all_region_stack_empty() {
223   uint parallel_gc_threads = ParallelGCThreads;
224   for (uint i = 0; i < parallel_gc_threads; i++) {
225     assert(_manager_array[i]->region_stack()->is_empty(), "Region stack should be empty");
226   }
227 }
228 #endif