1 /*
2 * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "memory/allocation.hpp"
28 #include "memory/universe.hpp"
29
30 #include "gc/shared/classUnloadingContext.hpp"
31 #include "gc/shared/gcArguments.hpp"
32 #include "gc/shared/gcTimer.hpp"
33 #include "gc/shared/gcTraceTime.inline.hpp"
34 #include "gc/shared/locationPrinter.inline.hpp"
35 #include "gc/shared/memAllocator.hpp"
36 #include "gc/shared/plab.hpp"
37 #include "gc/shared/tlab_globals.hpp"
38
39 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
40 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
41 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
42 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
43 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahControlThread.hpp"
46 #include "gc/shenandoah/shenandoahFreeSet.hpp"
47 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
48 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
49 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
50 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
51 #include "gc/shenandoah/shenandoahInitLogger.hpp"
52 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
53 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
54 #include "gc/shenandoah/shenandoahMetrics.hpp"
55 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
56 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
58 #include "gc/shenandoah/shenandoahPadding.hpp"
59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
60 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
61 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
63 #include "gc/shenandoah/shenandoahUtils.hpp"
64 #include "gc/shenandoah/shenandoahVerifier.hpp"
65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
69 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
70 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
71 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
72 #if INCLUDE_JFR
73 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
74 #endif
75
76 #include "classfile/systemDictionary.hpp"
77 #include "code/codeCache.hpp"
78 #include "memory/classLoaderMetaspace.hpp"
79 #include "memory/metaspaceUtils.hpp"
80 #include "oops/compressedOops.inline.hpp"
81 #include "prims/jvmtiTagMap.hpp"
82 #include "runtime/atomic.hpp"
83 #include "runtime/globals.hpp"
84 #include "runtime/interfaceSupport.inline.hpp"
85 #include "runtime/java.hpp"
86 #include "runtime/orderAccess.hpp"
87 #include "runtime/safepointMechanism.hpp"
88 #include "runtime/vmThread.hpp"
89 #include "services/mallocTracker.hpp"
90 #include "services/memTracker.hpp"
91 #include "utilities/events.hpp"
92 #include "utilities/powerOfTwo.hpp"
93
94 class ShenandoahPretouchHeapTask : public WorkerTask {
95 private:
96 ShenandoahRegionIterator _regions;
97 const size_t _page_size;
98 public:
99 ShenandoahPretouchHeapTask(size_t page_size) :
100 WorkerTask("Shenandoah Pretouch Heap"),
101 _page_size(page_size) {}
102
103 virtual void work(uint worker_id) {
104 ShenandoahHeapRegion* r = _regions.next();
105 while (r != nullptr) {
106 if (r->is_committed()) {
107 os::pretouch_memory(r->bottom(), r->end(), _page_size);
108 }
109 r = _regions.next();
110 }
111 }
112 };
113
114 class ShenandoahPretouchBitmapTask : public WorkerTask {
115 private:
116 ShenandoahRegionIterator _regions;
117 char* _bitmap_base;
118 const size_t _bitmap_size;
119 const size_t _page_size;
120 public:
121 ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
122 WorkerTask("Shenandoah Pretouch Bitmap"),
123 _bitmap_base(bitmap_base),
124 _bitmap_size(bitmap_size),
125 _page_size(page_size) {}
126
127 virtual void work(uint worker_id) {
128 ShenandoahHeapRegion* r = _regions.next();
129 while (r != nullptr) {
130 size_t start = r->index() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
131 size_t end = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
132 assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
133
134 if (r->is_committed()) {
135 os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
136 }
137
138 r = _regions.next();
139 }
140 }
141 };
142
143 jint ShenandoahHeap::initialize() {
144 //
145 // Figure out heap sizing
146 //
147
148 size_t init_byte_size = InitialHeapSize;
149 size_t min_byte_size = MinHeapSize;
150 size_t max_byte_size = MaxHeapSize;
151 size_t heap_alignment = HeapAlignment;
152
153 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
154
155 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
156 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
157
158 _num_regions = ShenandoahHeapRegion::region_count();
159 assert(_num_regions == (max_byte_size / reg_size_bytes),
160 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
161 _num_regions, max_byte_size, reg_size_bytes);
162
163 // Now we know the number of regions, initialize the heuristics.
164 initialize_heuristics();
165
166 size_t num_committed_regions = init_byte_size / reg_size_bytes;
167 num_committed_regions = MIN2(num_committed_regions, _num_regions);
168 assert(num_committed_regions <= _num_regions, "sanity");
169 _initial_size = num_committed_regions * reg_size_bytes;
170
171 size_t num_min_regions = min_byte_size / reg_size_bytes;
172 num_min_regions = MIN2(num_min_regions, _num_regions);
173 assert(num_min_regions <= _num_regions, "sanity");
174 _minimum_size = num_min_regions * reg_size_bytes;
175
176 // Default to max heap size.
177 _soft_max_size = _num_regions * reg_size_bytes;
178
179 _committed = _initial_size;
180
181 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
182 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
183 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
184
185 //
186 // Reserve and commit memory for heap
187 //
188
189 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
190 initialize_reserved_region(heap_rs);
191 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
192 _heap_region_special = heap_rs.special();
193
194 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
195 "Misaligned heap: " PTR_FORMAT, p2i(base()));
196
197 #if SHENANDOAH_OPTIMIZED_MARKTASK
198 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
199 // Fail if we ever attempt to address more than we can.
200 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
201 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
202 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
203 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
204 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
205 vm_exit_during_initialization("Fatal Error", buf);
206 }
207 #endif
208
209 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
210 if (!_heap_region_special) {
211 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
212 "Cannot commit heap memory");
213 }
214
215 //
216 // Reserve and commit memory for bitmap(s)
217 //
218
219 _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
220 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
221
222 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
223
224 guarantee(bitmap_bytes_per_region != 0,
225 "Bitmap bytes per region should not be zero");
226 guarantee(is_power_of_2(bitmap_bytes_per_region),
227 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
228
229 if (bitmap_page_size > bitmap_bytes_per_region) {
230 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
231 _bitmap_bytes_per_slice = bitmap_page_size;
232 } else {
233 _bitmap_regions_per_slice = 1;
234 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
235 }
236
237 guarantee(_bitmap_regions_per_slice >= 1,
238 "Should have at least one region per slice: " SIZE_FORMAT,
239 _bitmap_regions_per_slice);
240
241 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
242 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
243 _bitmap_bytes_per_slice, bitmap_page_size);
244
245 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
246 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
247 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
248 _bitmap_region_special = bitmap.special();
249
250 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
251 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
252 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
253 if (!_bitmap_region_special) {
254 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
255 "Cannot commit bitmap memory");
256 }
257
258 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
259
260 if (ShenandoahVerify) {
261 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
262 if (!verify_bitmap.special()) {
263 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
264 "Cannot commit verification bitmap memory");
265 }
266 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
267 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
268 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
269 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
270 }
271
272 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
273 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
274 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
275 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
276 _aux_bitmap_region_special = aux_bitmap.special();
277 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
278
279 //
280 // Create regions and region sets
281 //
282 size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
283 size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
284 region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
285
286 ReservedSpace region_storage(region_storage_size, region_page_size);
287 MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
288 if (!region_storage.special()) {
289 os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
290 "Cannot commit region memory");
291 }
292
293 // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
294 // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
295 // If not successful, bite a bullet and allocate at whatever address.
296 {
297 size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
298 size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
299
300 uintptr_t min = round_up_power_of_2(cset_align);
301 uintptr_t max = (1u << 30u);
302
303 for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
304 char* req_addr = (char*)addr;
305 assert(is_aligned(req_addr, cset_align), "Should be aligned");
306 ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
307 if (cset_rs.is_reserved()) {
308 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
309 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
310 break;
311 }
312 }
313
314 if (_collection_set == nullptr) {
315 ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
316 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
317 }
318 }
319
320 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
321 _free_set = new ShenandoahFreeSet(this, _num_regions);
322
323 {
324 ShenandoahHeapLocker locker(lock());
325
326 for (size_t i = 0; i < _num_regions; i++) {
327 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
328 bool is_committed = i < num_committed_regions;
329 void* loc = region_storage.base() + i * region_align;
330
331 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
332 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
333
334 _marking_context->initialize_top_at_mark_start(r);
335 _regions[i] = r;
336 assert(!collection_set()->is_in(i), "New region should not be in collection set");
337 }
338
339 // Initialize to complete
340 _marking_context->mark_complete();
341
342 _free_set->rebuild();
343 }
344
345 if (AlwaysPreTouch) {
346 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
347 // before initialize() below zeroes it with initializing thread. For any given region,
348 // we touch the region and the corresponding bitmaps from the same thread.
349 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
350
351 _pretouch_heap_page_size = heap_page_size;
352 _pretouch_bitmap_page_size = bitmap_page_size;
353
354 #ifdef LINUX
355 // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
356 // pages. But, the kernel needs to know that every small page is used, in order to coalesce
357 // them into huge one. Therefore, we need to pretouch with smaller pages.
358 if (UseTransparentHugePages) {
359 _pretouch_heap_page_size = (size_t)os::vm_page_size();
360 _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
361 }
362 #endif
363
364 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
365 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
366
367 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
368 _workers->run_task(&bcl);
369
370 ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
371 _workers->run_task(&hcl);
372 }
373
374 //
375 // Initialize the rest of GC subsystems
376 //
377
378 _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
379 for (uint worker = 0; worker < _max_workers; worker++) {
380 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
381 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
382 }
383
384 // There should probably be Shenandoah-specific options for these,
385 // just as there are G1-specific options.
386 {
387 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
388 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
389 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
390 }
391
392 _monitoring_support = new ShenandoahMonitoringSupport(this);
393 _phase_timings = new ShenandoahPhaseTimings(max_workers());
394 ShenandoahCodeRoots::initialize();
395
396 if (ShenandoahPacing) {
397 _pacer = new ShenandoahPacer(this);
398 _pacer->setup_for_idle();
399 } else {
400 _pacer = nullptr;
401 }
402
403 _control_thread = new ShenandoahControlThread();
404
405 ShenandoahInitLogger::print();
406
407 return JNI_OK;
408 }
409
410 void ShenandoahHeap::initialize_mode() {
411 if (ShenandoahGCMode != nullptr) {
412 if (strcmp(ShenandoahGCMode, "satb") == 0) {
413 _gc_mode = new ShenandoahSATBMode();
414 } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
415 _gc_mode = new ShenandoahIUMode();
416 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
417 _gc_mode = new ShenandoahPassiveMode();
418 } else {
419 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
420 }
421 } else {
422 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
423 }
424 _gc_mode->initialize_flags();
425 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
426 vm_exit_during_initialization(
427 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
428 _gc_mode->name()));
429 }
430 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
431 vm_exit_during_initialization(
432 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
433 _gc_mode->name()));
434 }
435 }
436
437 void ShenandoahHeap::initialize_heuristics() {
438 assert(_gc_mode != nullptr, "Must be initialized");
439 _heuristics = _gc_mode->initialize_heuristics();
440
441 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
442 vm_exit_during_initialization(
443 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
444 _heuristics->name()));
445 }
446 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
447 vm_exit_during_initialization(
448 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
449 _heuristics->name()));
450 }
451 }
452
453 #ifdef _MSC_VER
454 #pragma warning( push )
455 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
456 #endif
457
458 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
459 CollectedHeap(),
460 _initial_size(0),
461 _used(0),
462 _committed(0),
463 _bytes_allocated_since_gc_start(0),
464 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
465 _workers(nullptr),
466 _safepoint_workers(nullptr),
467 _heap_region_special(false),
468 _num_regions(0),
469 _regions(nullptr),
470 _update_refs_iterator(this),
471 _gc_state_changed(false),
472 _control_thread(nullptr),
473 _shenandoah_policy(policy),
474 _gc_mode(nullptr),
475 _heuristics(nullptr),
476 _free_set(nullptr),
477 _pacer(nullptr),
478 _verifier(nullptr),
479 _phase_timings(nullptr),
480 _monitoring_support(nullptr),
481 _memory_pool(nullptr),
482 _stw_memory_manager("Shenandoah Pauses"),
483 _cycle_memory_manager("Shenandoah Cycles"),
484 _gc_timer(new ConcurrentGCTimer()),
485 _soft_ref_policy(),
486 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
487 _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
488 _marking_context(nullptr),
489 _bitmap_size(0),
490 _bitmap_regions_per_slice(0),
491 _bitmap_bytes_per_slice(0),
492 _bitmap_region_special(false),
493 _aux_bitmap_region_special(false),
494 _liveness_cache(nullptr),
495 _collection_set(nullptr)
496 {
497 // Initialize GC mode early, so we can adjust barrier support
498 initialize_mode();
499 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
500
501 _max_workers = MAX2(_max_workers, 1U);
502 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
503 if (_workers == nullptr) {
504 vm_exit_during_initialization("Failed necessary allocation.");
505 } else {
506 _workers->initialize_workers();
507 }
508
509 if (ParallelGCThreads > 1) {
510 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
511 ParallelGCThreads);
512 _safepoint_workers->initialize_workers();
513 }
514 }
515
516 #ifdef _MSC_VER
517 #pragma warning( pop )
518 #endif
519
520 class ShenandoahResetBitmapTask : public WorkerTask {
521 private:
522 ShenandoahRegionIterator _regions;
523
524 public:
525 ShenandoahResetBitmapTask() :
526 WorkerTask("Shenandoah Reset Bitmap") {}
527
528 void work(uint worker_id) {
529 ShenandoahHeapRegion* region = _regions.next();
530 ShenandoahHeap* heap = ShenandoahHeap::heap();
531 ShenandoahMarkingContext* const ctx = heap->marking_context();
532 while (region != nullptr) {
533 if (heap->is_bitmap_slice_committed(region)) {
534 ctx->clear_bitmap(region);
535 }
536 region = _regions.next();
537 }
538 }
539 };
540
541 void ShenandoahHeap::reset_mark_bitmap() {
542 assert_gc_workers(_workers->active_workers());
543 mark_incomplete_marking_context();
544
545 ShenandoahResetBitmapTask task;
546 _workers->run_task(&task);
547 }
548
549 void ShenandoahHeap::print_on(outputStream* st) const {
550 st->print_cr("Shenandoah Heap");
551 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
552 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
553 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
554 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
555 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
556 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
557 num_regions(),
558 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
559 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
560
561 st->print("Status: ");
562 if (has_forwarded_objects()) st->print("has forwarded objects, ");
563 if (is_concurrent_mark_in_progress()) st->print("marking, ");
564 if (is_evacuation_in_progress()) st->print("evacuating, ");
565 if (is_update_refs_in_progress()) st->print("updating refs, ");
566 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
567 if (is_full_gc_in_progress()) st->print("full gc, ");
568 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
569 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
570 if (is_concurrent_strong_root_in_progress() &&
571 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
572
573 if (cancelled_gc()) {
574 st->print("cancelled");
575 } else {
576 st->print("not cancelled");
577 }
578 st->cr();
579
580 st->print_cr("Reserved region:");
581 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
582 p2i(reserved_region().start()),
583 p2i(reserved_region().end()));
584
585 ShenandoahCollectionSet* cset = collection_set();
586 st->print_cr("Collection set:");
587 if (cset != nullptr) {
588 st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
589 st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
590 } else {
591 st->print_cr(" (null)");
592 }
593
594 st->cr();
595 MetaspaceUtils::print_on(st);
596
597 if (Verbose) {
598 st->cr();
599 print_heap_regions_on(st);
600 }
601 }
602
603 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
604 public:
605 void do_thread(Thread* thread) {
606 assert(thread != nullptr, "Sanity");
607 assert(thread->is_Worker_thread(), "Only worker thread expected");
608 ShenandoahThreadLocalData::initialize_gclab(thread);
609 }
610 };
611
612 void ShenandoahHeap::post_initialize() {
613 CollectedHeap::post_initialize();
614 MutexLocker ml(Threads_lock);
615
616 ShenandoahInitWorkerGCLABClosure init_gclabs;
617 _workers->threads_do(&init_gclabs);
618
619 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
620 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
621 _workers->set_initialize_gclab();
622 if (_safepoint_workers != nullptr) {
623 _safepoint_workers->threads_do(&init_gclabs);
624 _safepoint_workers->set_initialize_gclab();
625 }
626
627 _heuristics->initialize();
628
629 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
630 }
631
632 size_t ShenandoahHeap::used() const {
633 return Atomic::load(&_used);
634 }
635
636 size_t ShenandoahHeap::committed() const {
637 return Atomic::load(&_committed);
638 }
639
640 void ShenandoahHeap::increase_committed(size_t bytes) {
641 shenandoah_assert_heaplocked_or_safepoint();
642 _committed += bytes;
643 }
644
645 void ShenandoahHeap::decrease_committed(size_t bytes) {
646 shenandoah_assert_heaplocked_or_safepoint();
647 _committed -= bytes;
648 }
649
650 void ShenandoahHeap::increase_used(size_t bytes) {
651 Atomic::add(&_used, bytes, memory_order_relaxed);
652 }
653
654 void ShenandoahHeap::set_used(size_t bytes) {
655 Atomic::store(&_used, bytes);
656 }
657
658 void ShenandoahHeap::decrease_used(size_t bytes) {
659 assert(used() >= bytes, "never decrease heap size by more than we've left");
660 Atomic::sub(&_used, bytes, memory_order_relaxed);
661 }
662
663 void ShenandoahHeap::increase_allocated(size_t bytes) {
664 Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
665 }
666
667 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
668 size_t bytes = words * HeapWordSize;
669 if (!waste) {
670 increase_used(bytes);
671 }
672 increase_allocated(bytes);
673 if (ShenandoahPacing) {
674 control_thread()->pacing_notify_alloc(words);
675 if (waste) {
676 pacer()->claim_for_alloc(words, true);
677 }
678 }
679 }
680
681 size_t ShenandoahHeap::capacity() const {
682 return committed();
683 }
684
685 size_t ShenandoahHeap::max_capacity() const {
686 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
687 }
688
689 size_t ShenandoahHeap::soft_max_capacity() const {
690 size_t v = Atomic::load(&_soft_max_size);
691 assert(min_capacity() <= v && v <= max_capacity(),
692 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
693 min_capacity(), v, max_capacity());
694 return v;
695 }
696
697 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
698 assert(min_capacity() <= v && v <= max_capacity(),
699 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
700 min_capacity(), v, max_capacity());
701 Atomic::store(&_soft_max_size, v);
702 }
703
704 size_t ShenandoahHeap::min_capacity() const {
705 return _minimum_size;
706 }
707
708 size_t ShenandoahHeap::initial_capacity() const {
709 return _initial_size;
710 }
711
712 bool ShenandoahHeap::is_in(const void* p) const {
713 if (is_in_reserved(p)) {
714 if (is_full_gc_move_in_progress()) {
715 // Full GC move is running, we do not have a consistent region
716 // information yet. But we know the pointer is in heap.
717 return true;
718 }
719 // Now check if we point to a live section in active region.
720 ShenandoahHeapRegion* r = heap_region_containing(p);
721 return (r->is_active() && p < r->top());
722 } else {
723 return false;
724 }
725 }
726
727 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
728 assert (ShenandoahUncommit, "should be enabled");
729
730 // Application allocates from the beginning of the heap, and GC allocates at
731 // the end of it. It is more efficient to uncommit from the end, so that applications
732 // could enjoy the near committed regions. GC allocations are much less frequent,
733 // and therefore can accept the committing costs.
734
735 size_t count = 0;
736 for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
737 ShenandoahHeapRegion* r = get_region(i - 1);
738 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
739 ShenandoahHeapLocker locker(lock());
740 if (r->is_empty_committed()) {
741 if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
742 break;
743 }
744
745 r->make_uncommitted();
746 count++;
747 }
748 }
749 SpinPause(); // allow allocators to take the lock
750 }
751
752 if (count > 0) {
753 control_thread()->notify_heap_changed();
754 }
755 }
756
757 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
758 // New object should fit the GCLAB size
759 size_t min_size = MAX2(size, PLAB::min_size());
760
761 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
762 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
763 new_size = MIN2(new_size, PLAB::max_size());
764 new_size = MAX2(new_size, PLAB::min_size());
765
766 // Record new heuristic value even if we take any shortcut. This captures
767 // the case when moderately-sized objects always take a shortcut. At some point,
768 // heuristics should catch up with them.
769 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
770
771 if (new_size < size) {
772 // New size still does not fit the object. Fall back to shared allocation.
773 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
774 return nullptr;
775 }
776
777 // Retire current GCLAB, and allocate a new one.
778 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
779 gclab->retire();
780
781 size_t actual_size = 0;
782 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
783 if (gclab_buf == nullptr) {
784 return nullptr;
785 }
786
787 assert (size <= actual_size, "allocation should fit");
788
789 // ...and clear or zap just allocated TLAB, if needed.
790 if (ZeroTLAB) {
791 Copy::zero_to_words(gclab_buf, actual_size);
792 } else if (ZapTLAB) {
793 // Skip mangling the space corresponding to the object header to
794 // ensure that the returned space is not considered parsable by
795 // any concurrent GC thread.
796 size_t hdr_size = oopDesc::header_size();
797 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
798 }
799 gclab->set_buf(gclab_buf, actual_size);
800 return gclab->allocate(size);
801 }
802
803 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
804 size_t requested_size,
805 size_t* actual_size) {
806 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
807 HeapWord* res = allocate_memory(req);
808 if (res != nullptr) {
809 *actual_size = req.actual_size();
810 } else {
811 *actual_size = 0;
812 }
813 return res;
814 }
815
816 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
817 size_t word_size,
818 size_t* actual_size) {
819 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
820 HeapWord* res = allocate_memory(req);
821 if (res != nullptr) {
822 *actual_size = req.actual_size();
823 } else {
824 *actual_size = 0;
825 }
826 return res;
827 }
828
829 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
830 intptr_t pacer_epoch = 0;
831 bool in_new_region = false;
832 HeapWord* result = nullptr;
833
834 if (req.is_mutator_alloc()) {
835 if (ShenandoahPacing) {
836 pacer()->pace_for_alloc(req.size());
837 pacer_epoch = pacer()->epoch();
838 }
839
840 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
841 result = allocate_memory_under_lock(req, in_new_region);
842 }
843
844 // Allocation failed, block until control thread reacted, then retry allocation.
845 //
846 // It might happen that one of the threads requesting allocation would unblock
847 // way later after GC happened, only to fail the second allocation, because
848 // other threads have already depleted the free storage. In this case, a better
849 // strategy is to try again, as long as GC makes progress (or until at least
850 // one full GC has completed).
851 size_t original_count = shenandoah_policy()->full_gc_count();
852 while (result == nullptr
853 && (_progress_last_gc.is_set() || original_count == shenandoah_policy()->full_gc_count())) {
854 control_thread()->handle_alloc_failure(req);
855 result = allocate_memory_under_lock(req, in_new_region);
856 }
857 } else {
858 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
859 result = allocate_memory_under_lock(req, in_new_region);
860 // Do not call handle_alloc_failure() here, because we cannot block.
861 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
862 }
863
864 if (in_new_region) {
865 control_thread()->notify_heap_changed();
866 }
867
868 if (result != nullptr) {
869 size_t requested = req.size();
870 size_t actual = req.actual_size();
871
872 assert (req.is_lab_alloc() || (requested == actual),
873 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
874 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
875
876 if (req.is_mutator_alloc()) {
877 notify_mutator_alloc_words(actual, false);
878
879 // If we requested more than we were granted, give the rest back to pacer.
880 // This only matters if we are in the same pacing epoch: do not try to unpace
881 // over the budget for the other phase.
882 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
883 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
884 }
885 } else {
886 increase_used(actual*HeapWordSize);
887 }
888 }
889
890 return result;
891 }
892
893 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
894 // If we are dealing with mutator allocation, then we may need to block for safepoint.
895 // We cannot block for safepoint for GC allocations, because there is a high chance
896 // we are already running at safepoint or from stack watermark machinery, and we cannot
897 // block again.
898 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
899 return _free_set->allocate(req, in_new_region);
900 }
901
902 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
903 bool* gc_overhead_limit_was_exceeded) {
904 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
905 return allocate_memory(req);
906 }
907
908 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
909 size_t size,
910 Metaspace::MetadataType mdtype) {
911 MetaWord* result;
912
913 // Inform metaspace OOM to GC heuristics if class unloading is possible.
914 if (heuristics()->can_unload_classes()) {
915 ShenandoahHeuristics* h = heuristics();
916 h->record_metaspace_oom();
917 }
918
919 // Expand and retry allocation
920 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
921 if (result != nullptr) {
922 return result;
923 }
924
925 // Start full GC
926 collect(GCCause::_metadata_GC_clear_soft_refs);
927
928 // Retry allocation
929 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
930 if (result != nullptr) {
931 return result;
932 }
933
934 // Expand and retry allocation
935 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
936 if (result != nullptr) {
937 return result;
938 }
939
940 // Out of memory
941 return nullptr;
942 }
943
944 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
945 private:
946 ShenandoahHeap* const _heap;
947 Thread* const _thread;
948 public:
949 ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
950 _heap(heap), _thread(Thread::current()) {}
951
952 void do_object(oop p) {
953 shenandoah_assert_marked(nullptr, p);
954 if (!p->is_forwarded()) {
955 _heap->evacuate_object(p, _thread);
956 }
957 }
958 };
959
960 class ShenandoahEvacuationTask : public WorkerTask {
961 private:
962 ShenandoahHeap* const _sh;
963 ShenandoahCollectionSet* const _cs;
964 bool _concurrent;
965 public:
966 ShenandoahEvacuationTask(ShenandoahHeap* sh,
967 ShenandoahCollectionSet* cs,
968 bool concurrent) :
969 WorkerTask("Shenandoah Evacuation"),
970 _sh(sh),
971 _cs(cs),
972 _concurrent(concurrent)
973 {}
974
975 void work(uint worker_id) {
976 if (_concurrent) {
977 ShenandoahConcurrentWorkerSession worker_session(worker_id);
978 ShenandoahSuspendibleThreadSetJoiner stsj;
979 ShenandoahEvacOOMScope oom_evac_scope;
980 do_work();
981 } else {
982 ShenandoahParallelWorkerSession worker_session(worker_id);
983 ShenandoahEvacOOMScope oom_evac_scope;
984 do_work();
985 }
986 }
987
988 private:
989 void do_work() {
990 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
991 ShenandoahHeapRegion* r;
992 while ((r =_cs->claim_next()) != nullptr) {
993 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
994 _sh->marked_object_iterate(r, &cl);
995
996 if (ShenandoahPacing) {
997 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
998 }
999
1000 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1001 break;
1002 }
1003 }
1004 }
1005 };
1006
1007 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1008 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1009 workers()->run_task(&task);
1010 }
1011
1012 void ShenandoahHeap::trash_cset_regions() {
1013 ShenandoahHeapLocker locker(lock());
1014
1015 ShenandoahCollectionSet* set = collection_set();
1016 ShenandoahHeapRegion* r;
1017 set->clear_current_index();
1018 while ((r = set->next()) != nullptr) {
1019 r->make_trash();
1020 }
1021 collection_set()->clear();
1022 }
1023
1024 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1025 st->print_cr("Heap Regions:");
1026 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1027 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1028 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1029 st->print_cr("UWM=update watermark, U=used");
1030 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1031 st->print_cr("S=shared allocs, L=live data");
1032 st->print_cr("CP=critical pins");
1033
1034 for (size_t i = 0; i < num_regions(); i++) {
1035 get_region(i)->print_on(st);
1036 }
1037 }
1038
1039 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) const {
1040 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1041 assert(!start->has_live(), "liveness must be zero");
1042
1043 // Do not try to get the size of this humongous object. STW collections will
1044 // have already unloaded classes, so an unmarked object may have a bad klass pointer.
1045 ShenandoahHeapRegion* region = start;
1046 size_t index = region->index();
1047 do {
1048 assert(region->is_humongous(), "Expect correct humongous start or continuation");
1049 assert(!region->is_cset(), "Humongous region should not be in collection set");
1050 region->make_trash_immediate();
1051 region = get_region(++index);
1052 } while (region != nullptr && region->is_humongous_continuation());
1053
1054 // Return number of regions trashed
1055 return index - start->index();
1056 }
1057
1058 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1059 public:
1060 ShenandoahCheckCleanGCLABClosure() {}
1061 void do_thread(Thread* thread) {
1062 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1063 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1064 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1065 }
1066 };
1067
1068 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1069 private:
1070 bool const _resize;
1071 public:
1072 ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1073 void do_thread(Thread* thread) {
1074 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1075 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1076 gclab->retire();
1077 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1078 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1079 }
1080 }
1081 };
1082
1083 void ShenandoahHeap::labs_make_parsable() {
1084 assert(UseTLAB, "Only call with UseTLAB");
1085
1086 ShenandoahRetireGCLABClosure cl(false);
1087
1088 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1089 ThreadLocalAllocBuffer& tlab = t->tlab();
1090 tlab.make_parsable();
1091 cl.do_thread(t);
1092 }
1093
1094 workers()->threads_do(&cl);
1095 }
1096
1097 void ShenandoahHeap::tlabs_retire(bool resize) {
1098 assert(UseTLAB, "Only call with UseTLAB");
1099 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1100
1101 ThreadLocalAllocStats stats;
1102
1103 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1104 ThreadLocalAllocBuffer& tlab = t->tlab();
1105 tlab.retire(&stats);
1106 if (resize) {
1107 tlab.resize();
1108 }
1109 }
1110
1111 stats.publish();
1112
1113 #ifdef ASSERT
1114 ShenandoahCheckCleanGCLABClosure cl;
1115 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1116 cl.do_thread(t);
1117 }
1118 workers()->threads_do(&cl);
1119 #endif
1120 }
1121
1122 void ShenandoahHeap::gclabs_retire(bool resize) {
1123 assert(UseTLAB, "Only call with UseTLAB");
1124 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1125
1126 ShenandoahRetireGCLABClosure cl(resize);
1127 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1128 cl.do_thread(t);
1129 }
1130 workers()->threads_do(&cl);
1131
1132 if (safepoint_workers() != nullptr) {
1133 safepoint_workers()->threads_do(&cl);
1134 }
1135 }
1136
1137 // Returns size in bytes
1138 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1139 // Return the max allowed size, and let the allocation path
1140 // figure out the safe size for current allocation.
1141 return ShenandoahHeapRegion::max_tlab_size_bytes();
1142 }
1143
1144 size_t ShenandoahHeap::max_tlab_size() const {
1145 // Returns size in words
1146 return ShenandoahHeapRegion::max_tlab_size_words();
1147 }
1148
1149 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1150 // These requests are ignored because we can't easily have Shenandoah jump into
1151 // a synchronous (degenerated or full) cycle while it is in the middle of a concurrent
1152 // cycle. We _could_ cancel the concurrent cycle and then try to run a cycle directly
1153 // on the VM thread, but this would confuse the control thread mightily and doesn't
1154 // seem worth the trouble. Instead, we will have the caller thread run (and wait for) a
1155 // concurrent cycle in the prologue of the heap inspect/dump operation. This is how
1156 // other concurrent collectors in the JVM handle this scenario as well.
1157 assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
1158 guarantee(cause == GCCause::_heap_dump || cause == GCCause::_heap_inspection, "Invalid cause");
1159 }
1160
1161 void ShenandoahHeap::collect(GCCause::Cause cause) {
1162 control_thread()->request_gc(cause);
1163 }
1164
1165 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1166 //assert(false, "Shouldn't need to do full collections");
1167 }
1168
1169 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1170 ShenandoahHeapRegion* r = heap_region_containing(addr);
1171 if (r != nullptr) {
1172 return r->block_start(addr);
1173 }
1174 return nullptr;
1175 }
1176
1177 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1178 ShenandoahHeapRegion* r = heap_region_containing(addr);
1179 return r->block_is_obj(addr);
1180 }
1181
1182 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1183 return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1184 }
1185
1186 void ShenandoahHeap::prepare_for_verify() {
1187 if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1188 labs_make_parsable();
1189 }
1190 }
1191
1192 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1193 tcl->do_thread(_control_thread);
1194 workers()->threads_do(tcl);
1195 if (_safepoint_workers != nullptr) {
1196 _safepoint_workers->threads_do(tcl);
1197 }
1198 }
1199
1200 void ShenandoahHeap::print_tracing_info() const {
1201 LogTarget(Info, gc, stats) lt;
1202 if (lt.is_enabled()) {
1203 ResourceMark rm;
1204 LogStream ls(lt);
1205
1206 phase_timings()->print_global_on(&ls);
1207
1208 ls.cr();
1209 ls.cr();
1210
1211 shenandoah_policy()->print_gc_stats(&ls);
1212
1213 ls.cr();
1214 ls.cr();
1215 }
1216 }
1217
1218 void ShenandoahHeap::verify(VerifyOption vo) {
1219 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1220 if (ShenandoahVerify) {
1221 verifier()->verify_generic(vo);
1222 } else {
1223 // TODO: Consider allocating verification bitmaps on demand,
1224 // and turn this on unconditionally.
1225 }
1226 }
1227 }
1228 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1229 return _free_set->capacity();
1230 }
1231
1232 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1233 private:
1234 MarkBitMap* _bitmap;
1235 ShenandoahScanObjectStack* _oop_stack;
1236 ShenandoahHeap* const _heap;
1237 ShenandoahMarkingContext* const _marking_context;
1238
1239 template <class T>
1240 void do_oop_work(T* p) {
1241 T o = RawAccess<>::oop_load(p);
1242 if (!CompressedOops::is_null(o)) {
1243 oop obj = CompressedOops::decode_not_null(o);
1244 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1245 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1246 return;
1247 }
1248 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1249
1250 assert(oopDesc::is_oop(obj), "must be a valid oop");
1251 if (!_bitmap->is_marked(obj)) {
1252 _bitmap->mark(obj);
1253 _oop_stack->push(obj);
1254 }
1255 }
1256 }
1257 public:
1258 ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1259 _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1260 _marking_context(_heap->marking_context()) {}
1261 void do_oop(oop* p) { do_oop_work(p); }
1262 void do_oop(narrowOop* p) { do_oop_work(p); }
1263 };
1264
1265 /*
1266 * This is public API, used in preparation of object_iterate().
1267 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1268 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1269 * control, we call SH::tlabs_retire, SH::gclabs_retire.
1270 */
1271 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1272 // No-op.
1273 }
1274
1275 /*
1276 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1277 *
1278 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1279 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1280 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1281 * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1282 * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1283 * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1284 * wiped the bitmap in preparation for next marking).
1285 *
1286 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1287 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1288 * is allowed to report dead objects, but is not required to do so.
1289 */
1290 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1291 // Reset bitmap
1292 if (!prepare_aux_bitmap_for_iteration())
1293 return;
1294
1295 ShenandoahScanObjectStack oop_stack;
1296 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1297 // Seed the stack with root scan
1298 scan_roots_for_iteration(&oop_stack, &oops);
1299
1300 // Work through the oop stack to traverse heap
1301 while (! oop_stack.is_empty()) {
1302 oop obj = oop_stack.pop();
1303 assert(oopDesc::is_oop(obj), "must be a valid oop");
1304 cl->do_object(obj);
1305 obj->oop_iterate(&oops);
1306 }
1307
1308 assert(oop_stack.is_empty(), "should be empty");
1309 // Reclaim bitmap
1310 reclaim_aux_bitmap_for_iteration();
1311 }
1312
1313 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1314 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1315
1316 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1317 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1318 return false;
1319 }
1320 // Reset bitmap
1321 _aux_bit_map.clear();
1322 return true;
1323 }
1324
1325 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1326 // Process GC roots according to current GC cycle
1327 // This populates the work stack with initial objects
1328 // It is important to relinquish the associated locks before diving
1329 // into heap dumper
1330 uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1331 ShenandoahHeapIterationRootScanner rp(n_workers);
1332 rp.roots_do(oops);
1333 }
1334
1335 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1336 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1337 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1338 }
1339 }
1340
1341 // Closure for parallelly iterate objects
1342 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1343 private:
1344 MarkBitMap* _bitmap;
1345 ShenandoahObjToScanQueue* _queue;
1346 ShenandoahHeap* const _heap;
1347 ShenandoahMarkingContext* const _marking_context;
1348
1349 template <class T>
1350 void do_oop_work(T* p) {
1351 T o = RawAccess<>::oop_load(p);
1352 if (!CompressedOops::is_null(o)) {
1353 oop obj = CompressedOops::decode_not_null(o);
1354 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1355 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1356 return;
1357 }
1358 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1359
1360 assert(oopDesc::is_oop(obj), "Must be a valid oop");
1361 if (_bitmap->par_mark(obj)) {
1362 _queue->push(ShenandoahMarkTask(obj));
1363 }
1364 }
1365 }
1366 public:
1367 ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1368 _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1369 _marking_context(_heap->marking_context()) {}
1370 void do_oop(oop* p) { do_oop_work(p); }
1371 void do_oop(narrowOop* p) { do_oop_work(p); }
1372 };
1373
1374 // Object iterator for parallel heap iteraion.
1375 // The root scanning phase happenes in construction as a preparation of
1376 // parallel marking queues.
1377 // Every worker processes it's own marking queue. work-stealing is used
1378 // to balance workload.
1379 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1380 private:
1381 uint _num_workers;
1382 bool _init_ready;
1383 MarkBitMap* _aux_bit_map;
1384 ShenandoahHeap* _heap;
1385 ShenandoahScanObjectStack _roots_stack; // global roots stack
1386 ShenandoahObjToScanQueueSet* _task_queues;
1387 public:
1388 ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1389 _num_workers(num_workers),
1390 _init_ready(false),
1391 _aux_bit_map(bitmap),
1392 _heap(ShenandoahHeap::heap()) {
1393 // Initialize bitmap
1394 _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1395 if (!_init_ready) {
1396 return;
1397 }
1398
1399 ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1400 _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1401
1402 _init_ready = prepare_worker_queues();
1403 }
1404
1405 ~ShenandoahParallelObjectIterator() {
1406 // Reclaim bitmap
1407 _heap->reclaim_aux_bitmap_for_iteration();
1408 // Reclaim queue for workers
1409 if (_task_queues!= nullptr) {
1410 for (uint i = 0; i < _num_workers; ++i) {
1411 ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1412 if (q != nullptr) {
1413 delete q;
1414 _task_queues->register_queue(i, nullptr);
1415 }
1416 }
1417 delete _task_queues;
1418 _task_queues = nullptr;
1419 }
1420 }
1421
1422 virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1423 if (_init_ready) {
1424 object_iterate_parallel(cl, worker_id, _task_queues);
1425 }
1426 }
1427
1428 private:
1429 // Divide global root_stack into worker queues
1430 bool prepare_worker_queues() {
1431 _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1432 // Initialize queues for every workers
1433 for (uint i = 0; i < _num_workers; ++i) {
1434 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1435 _task_queues->register_queue(i, task_queue);
1436 }
1437 // Divide roots among the workers. Assume that object referencing distribution
1438 // is related with root kind, use round-robin to make every worker have same chance
1439 // to process every kind of roots
1440 size_t roots_num = _roots_stack.size();
1441 if (roots_num == 0) {
1442 // No work to do
1443 return false;
1444 }
1445
1446 for (uint j = 0; j < roots_num; j++) {
1447 uint stack_id = j % _num_workers;
1448 oop obj = _roots_stack.pop();
1449 _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1450 }
1451 return true;
1452 }
1453
1454 void object_iterate_parallel(ObjectClosure* cl,
1455 uint worker_id,
1456 ShenandoahObjToScanQueueSet* queue_set) {
1457 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1458 assert(queue_set != nullptr, "task queue must not be null");
1459
1460 ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1461 assert(q != nullptr, "object iterate queue must not be null");
1462
1463 ShenandoahMarkTask t;
1464 ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1465
1466 // Work through the queue to traverse heap.
1467 // Steal when there is no task in queue.
1468 while (q->pop(t) || queue_set->steal(worker_id, t)) {
1469 oop obj = t.obj();
1470 assert(oopDesc::is_oop(obj), "must be a valid oop");
1471 cl->do_object(obj);
1472 obj->oop_iterate(&oops);
1473 }
1474 assert(q->is_empty(), "should be empty");
1475 }
1476 };
1477
1478 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1479 return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1480 }
1481
1482 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1483 void ShenandoahHeap::keep_alive(oop obj) {
1484 if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1485 ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1486 }
1487 }
1488
1489 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1490 for (size_t i = 0; i < num_regions(); i++) {
1491 ShenandoahHeapRegion* current = get_region(i);
1492 blk->heap_region_do(current);
1493 }
1494 }
1495
1496 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1497 private:
1498 ShenandoahHeap* const _heap;
1499 ShenandoahHeapRegionClosure* const _blk;
1500 size_t const _stride;
1501
1502 shenandoah_padding(0);
1503 volatile size_t _index;
1504 shenandoah_padding(1);
1505
1506 public:
1507 ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
1508 WorkerTask("Shenandoah Parallel Region Operation"),
1509 _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
1510
1511 void work(uint worker_id) {
1512 ShenandoahParallelWorkerSession worker_session(worker_id);
1513 size_t stride = _stride;
1514
1515 size_t max = _heap->num_regions();
1516 while (Atomic::load(&_index) < max) {
1517 size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1518 size_t start = cur;
1519 size_t end = MIN2(cur + stride, max);
1520 if (start >= max) break;
1521
1522 for (size_t i = cur; i < end; i++) {
1523 ShenandoahHeapRegion* current = _heap->get_region(i);
1524 _blk->heap_region_do(current);
1525 }
1526 }
1527 }
1528 };
1529
1530 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1531 assert(blk->is_thread_safe(), "Only thread-safe closures here");
1532 const uint active_workers = workers()->active_workers();
1533 const size_t n_regions = num_regions();
1534 size_t stride = ShenandoahParallelRegionStride;
1535 if (stride == 0 && active_workers > 1) {
1536 // Automatically derive the stride to balance the work between threads
1537 // evenly. Do not try to split work if below the reasonable threshold.
1538 constexpr size_t threshold = 4096;
1539 stride = n_regions <= threshold ?
1540 threshold :
1541 (n_regions + active_workers - 1) / active_workers;
1542 }
1543
1544 if (n_regions > stride && active_workers > 1) {
1545 ShenandoahParallelHeapRegionTask task(blk, stride);
1546 workers()->run_task(&task);
1547 } else {
1548 heap_region_iterate(blk);
1549 }
1550 }
1551
1552 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1553 private:
1554 ShenandoahMarkingContext* const _ctx;
1555 public:
1556 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1557
1558 void heap_region_do(ShenandoahHeapRegion* r) {
1559 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1560 if (r->is_active()) {
1561 // Check if region needs updating its TAMS. We have updated it already during concurrent
1562 // reset, so it is very likely we don't need to do another write here.
1563 if (_ctx->top_at_mark_start(r) != r->top()) {
1564 _ctx->capture_top_at_mark_start(r);
1565 }
1566 } else {
1567 assert(_ctx->top_at_mark_start(r) == r->top(),
1568 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1569 }
1570 }
1571
1572 bool is_thread_safe() { return true; }
1573 };
1574
1575 class ShenandoahRendezvousClosure : public HandshakeClosure {
1576 public:
1577 inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1578 inline void do_thread(Thread* thread) {}
1579 };
1580
1581 void ShenandoahHeap::rendezvous_threads(const char* name) {
1582 ShenandoahRendezvousClosure cl(name);
1583 Handshake::execute(&cl);
1584 }
1585
1586 void ShenandoahHeap::recycle_trash() {
1587 free_set()->recycle_trash();
1588 }
1589
1590 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1591 private:
1592 ShenandoahMarkingContext* const _ctx;
1593 public:
1594 ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1595
1596 void heap_region_do(ShenandoahHeapRegion* r) {
1597 if (r->is_active()) {
1598 // Reset live data and set TAMS optimistically. We would recheck these under the pause
1599 // anyway to capture any updates that happened since now.
1600 r->clear_live_data();
1601 _ctx->capture_top_at_mark_start(r);
1602 }
1603 }
1604
1605 bool is_thread_safe() { return true; }
1606 };
1607
1608 void ShenandoahHeap::prepare_gc() {
1609 reset_mark_bitmap();
1610
1611 ShenandoahResetUpdateRegionStateClosure cl;
1612 parallel_heap_region_iterate(&cl);
1613 }
1614
1615 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1616 private:
1617 ShenandoahMarkingContext* const _ctx;
1618 ShenandoahHeapLock* const _lock;
1619
1620 public:
1621 ShenandoahFinalMarkUpdateRegionStateClosure() :
1622 _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1623
1624 void heap_region_do(ShenandoahHeapRegion* r) {
1625 if (r->is_active()) {
1626 // All allocations past TAMS are implicitly live, adjust the region data.
1627 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1628 HeapWord *tams = _ctx->top_at_mark_start(r);
1629 HeapWord *top = r->top();
1630 if (top > tams) {
1631 r->increase_live_data_alloc_words(pointer_delta(top, tams));
1632 }
1633
1634 // We are about to select the collection set, make sure it knows about
1635 // current pinning status. Also, this allows trashing more regions that
1636 // now have their pinning status dropped.
1637 if (r->is_pinned()) {
1638 if (r->pin_count() == 0) {
1639 ShenandoahHeapLocker locker(_lock);
1640 r->make_unpinned();
1641 }
1642 } else {
1643 if (r->pin_count() > 0) {
1644 ShenandoahHeapLocker locker(_lock);
1645 r->make_pinned();
1646 }
1647 }
1648
1649 // Remember limit for updating refs. It's guaranteed that we get no
1650 // from-space-refs written from here on.
1651 r->set_update_watermark_at_safepoint(r->top());
1652 } else {
1653 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1654 assert(_ctx->top_at_mark_start(r) == r->top(),
1655 "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1656 }
1657 }
1658
1659 bool is_thread_safe() { return true; }
1660 };
1661
1662 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1663 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1664 {
1665 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1666 ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1667 ShenandoahFinalMarkUpdateRegionStateClosure cl;
1668 parallel_heap_region_iterate(&cl);
1669
1670 assert_pinned_region_status();
1671 }
1672
1673 {
1674 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1675 ShenandoahPhaseTimings::degen_gc_choose_cset);
1676 ShenandoahHeapLocker locker(lock());
1677 _collection_set->clear();
1678 heuristics()->choose_collection_set(_collection_set);
1679 }
1680
1681 {
1682 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1683 ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1684 ShenandoahHeapLocker locker(lock());
1685 _free_set->rebuild();
1686 }
1687 }
1688
1689 void ShenandoahHeap::do_class_unloading() {
1690 _unloader.unload();
1691 }
1692
1693 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1694 // Weak refs processing
1695 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1696 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1697 ShenandoahTimingsTracker t(phase);
1698 ShenandoahGCWorkerPhase worker_phase(phase);
1699 ref_processor()->process_references(phase, workers(), false /* concurrent */);
1700 }
1701
1702 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1703 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1704
1705 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1706 // make them parsable for update code to work correctly. Plus, we can compute new sizes
1707 // for future GCLABs here.
1708 if (UseTLAB) {
1709 ShenandoahGCPhase phase(concurrent ?
1710 ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1711 ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1712 gclabs_retire(ResizeTLAB);
1713 }
1714
1715 _update_refs_iterator.reset();
1716 }
1717
1718 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1719 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1720 if (_gc_state_changed) {
1721 _gc_state_changed = false;
1722 char state = gc_state();
1723 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1724 ShenandoahThreadLocalData::set_gc_state(t, state);
1725 }
1726 }
1727 }
1728
1729 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1730 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1731 _gc_state.set_cond(mask, value);
1732 _gc_state_changed = true;
1733 }
1734
1735 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1736 assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1737 set_gc_state(MARKING, in_progress);
1738 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1739 }
1740
1741 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1742 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1743 set_gc_state(EVACUATION, in_progress);
1744 }
1745
1746 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1747 if (in_progress) {
1748 _concurrent_strong_root_in_progress.set();
1749 } else {
1750 _concurrent_strong_root_in_progress.unset();
1751 }
1752 }
1753
1754 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1755 set_gc_state(WEAK_ROOTS, cond);
1756 }
1757
1758 GCTracer* ShenandoahHeap::tracer() {
1759 return shenandoah_policy()->tracer();
1760 }
1761
1762 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1763 return _free_set->used();
1764 }
1765
1766 bool ShenandoahHeap::try_cancel_gc() {
1767 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1768 return prev == CANCELLABLE;
1769 }
1770
1771 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1772 if (try_cancel_gc()) {
1773 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1774 log_info(gc)("%s", msg.buffer());
1775 Events::log(Thread::current(), "%s", msg.buffer());
1776 }
1777 }
1778
1779 uint ShenandoahHeap::max_workers() {
1780 return _max_workers;
1781 }
1782
1783 void ShenandoahHeap::stop() {
1784 // The shutdown sequence should be able to terminate when GC is running.
1785
1786 // Step 0. Notify policy to disable event recording.
1787 _shenandoah_policy->record_shutdown();
1788
1789 // Step 1. Notify control thread that we are in shutdown.
1790 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1791 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1792 control_thread()->prepare_for_graceful_shutdown();
1793
1794 // Step 2. Notify GC workers that we are cancelling GC.
1795 cancel_gc(GCCause::_shenandoah_stop_vm);
1796
1797 // Step 3. Wait until GC worker exits normally.
1798 control_thread()->stop();
1799 }
1800
1801 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1802 if (!unload_classes()) return;
1803 ClassUnloadingContext ctx(_workers->active_workers(),
1804 true /* unregister_nmethods_during_purge */,
1805 false /* lock_codeblob_free_separately */);
1806
1807 // Unload classes and purge SystemDictionary.
1808 {
1809 ShenandoahPhaseTimings::Phase phase = full_gc ?
1810 ShenandoahPhaseTimings::full_gc_purge_class_unload :
1811 ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1812 ShenandoahIsAliveSelector is_alive;
1813 {
1814 CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
1815 ShenandoahGCPhase gc_phase(phase);
1816 ShenandoahGCWorkerPhase worker_phase(phase);
1817 bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
1818
1819 uint num_workers = _workers->active_workers();
1820 ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
1821 _workers->run_task(&unlink_task);
1822 }
1823 // Release unloaded nmethods's memory.
1824 ClassUnloadingContext::context()->purge_and_free_nmethods();
1825 }
1826
1827 {
1828 ShenandoahGCPhase phase(full_gc ?
1829 ShenandoahPhaseTimings::full_gc_purge_cldg :
1830 ShenandoahPhaseTimings::degen_gc_purge_cldg);
1831 ClassLoaderDataGraph::purge(true /* at_safepoint */);
1832 }
1833 // Resize and verify metaspace
1834 MetaspaceGC::compute_new_size();
1835 DEBUG_ONLY(MetaspaceUtils::verify();)
1836 }
1837
1838 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
1839 // so they should not have forwarded oops.
1840 // However, we do need to "null" dead oops in the roots, if can not be done
1841 // in concurrent cycles.
1842 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
1843 uint num_workers = _workers->active_workers();
1844 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
1845 ShenandoahPhaseTimings::full_gc_purge_weak_par :
1846 ShenandoahPhaseTimings::degen_gc_purge_weak_par;
1847 ShenandoahGCPhase phase(timing_phase);
1848 ShenandoahGCWorkerPhase worker_phase(timing_phase);
1849 // Cleanup weak roots
1850 if (has_forwarded_objects()) {
1851 ShenandoahForwardedIsAliveClosure is_alive;
1852 ShenandoahUpdateRefsClosure keep_alive;
1853 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
1854 cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
1855 _workers->run_task(&cleaning_task);
1856 } else {
1857 ShenandoahIsAliveClosure is_alive;
1858 #ifdef ASSERT
1859 ShenandoahAssertNotForwardedClosure verify_cl;
1860 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
1861 cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
1862 #else
1863 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
1864 cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
1865 #endif
1866 _workers->run_task(&cleaning_task);
1867 }
1868 }
1869
1870 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
1871 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1872 assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
1873 ShenandoahGCPhase phase(full_gc ?
1874 ShenandoahPhaseTimings::full_gc_purge :
1875 ShenandoahPhaseTimings::degen_gc_purge);
1876 stw_weak_refs(full_gc);
1877 stw_process_weak_roots(full_gc);
1878 stw_unload_classes(full_gc);
1879 }
1880
1881 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1882 set_gc_state(HAS_FORWARDED, cond);
1883 }
1884
1885 void ShenandoahHeap::set_unload_classes(bool uc) {
1886 _unload_classes.set_cond(uc);
1887 }
1888
1889 bool ShenandoahHeap::unload_classes() const {
1890 return _unload_classes.is_set();
1891 }
1892
1893 address ShenandoahHeap::in_cset_fast_test_addr() {
1894 ShenandoahHeap* heap = ShenandoahHeap::heap();
1895 assert(heap->collection_set() != nullptr, "Sanity");
1896 return (address) heap->collection_set()->biased_map_address();
1897 }
1898
1899 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1900 return Atomic::load(&_bytes_allocated_since_gc_start);
1901 }
1902
1903 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1904 Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
1905 }
1906
1907 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1908 _degenerated_gc_in_progress.set_cond(in_progress);
1909 }
1910
1911 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1912 _full_gc_in_progress.set_cond(in_progress);
1913 }
1914
1915 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1916 assert (is_full_gc_in_progress(), "should be");
1917 _full_gc_move_in_progress.set_cond(in_progress);
1918 }
1919
1920 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1921 set_gc_state(UPDATEREFS, in_progress);
1922 }
1923
1924 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1925 ShenandoahCodeRoots::register_nmethod(nm);
1926 }
1927
1928 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
1929 ShenandoahCodeRoots::unregister_nmethod(nm);
1930 }
1931
1932 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
1933 heap_region_containing(o)->record_pin();
1934 }
1935
1936 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
1937 ShenandoahHeapRegion* r = heap_region_containing(o);
1938 assert(r != nullptr, "Sanity");
1939 assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
1940 r->record_unpin();
1941 }
1942
1943 void ShenandoahHeap::sync_pinned_region_status() {
1944 ShenandoahHeapLocker locker(lock());
1945
1946 for (size_t i = 0; i < num_regions(); i++) {
1947 ShenandoahHeapRegion *r = get_region(i);
1948 if (r->is_active()) {
1949 if (r->is_pinned()) {
1950 if (r->pin_count() == 0) {
1951 r->make_unpinned();
1952 }
1953 } else {
1954 if (r->pin_count() > 0) {
1955 r->make_pinned();
1956 }
1957 }
1958 }
1959 }
1960
1961 assert_pinned_region_status();
1962 }
1963
1964 #ifdef ASSERT
1965 void ShenandoahHeap::assert_pinned_region_status() {
1966 for (size_t i = 0; i < num_regions(); i++) {
1967 ShenandoahHeapRegion* r = get_region(i);
1968 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1969 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
1970 }
1971 }
1972 #endif
1973
1974 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1975 return _gc_timer;
1976 }
1977
1978 void ShenandoahHeap::prepare_concurrent_roots() {
1979 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1980 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1981 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1982 set_concurrent_weak_root_in_progress(true);
1983 if (unload_classes()) {
1984 _unloader.prepare();
1985 }
1986 }
1987
1988 void ShenandoahHeap::finish_concurrent_roots() {
1989 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1990 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1991 if (unload_classes()) {
1992 _unloader.finish();
1993 }
1994 }
1995
1996 #ifdef ASSERT
1997 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
1998 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
1999
2000 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2001 if (UseDynamicNumberOfGCThreads) {
2002 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2003 } else {
2004 // Use ParallelGCThreads inside safepoints
2005 assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
2006 }
2007 } else {
2008 if (UseDynamicNumberOfGCThreads) {
2009 assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2010 } else {
2011 // Use ConcGCThreads outside safepoints
2012 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2013 }
2014 }
2015 }
2016 #endif
2017
2018 ShenandoahVerifier* ShenandoahHeap::verifier() {
2019 guarantee(ShenandoahVerify, "Should be enabled");
2020 assert (_verifier != nullptr, "sanity");
2021 return _verifier;
2022 }
2023
2024 template<bool CONCURRENT>
2025 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2026 private:
2027 ShenandoahHeap* _heap;
2028 ShenandoahRegionIterator* _regions;
2029 public:
2030 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2031 WorkerTask("Shenandoah Update References"),
2032 _heap(ShenandoahHeap::heap()),
2033 _regions(regions) {
2034 }
2035
2036 void work(uint worker_id) {
2037 if (CONCURRENT) {
2038 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2039 ShenandoahSuspendibleThreadSetJoiner stsj;
2040 do_work<ShenandoahConcUpdateRefsClosure>();
2041 } else {
2042 ShenandoahParallelWorkerSession worker_session(worker_id);
2043 do_work<ShenandoahSTWUpdateRefsClosure>();
2044 }
2045 }
2046
2047 private:
2048 template<class T>
2049 void do_work() {
2050 T cl;
2051 ShenandoahHeapRegion* r = _regions->next();
2052 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2053 while (r != nullptr) {
2054 HeapWord* update_watermark = r->get_update_watermark();
2055 assert (update_watermark >= r->bottom(), "sanity");
2056 if (r->is_active() && !r->is_cset()) {
2057 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2058 }
2059 if (ShenandoahPacing) {
2060 _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2061 }
2062 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2063 return;
2064 }
2065 r = _regions->next();
2066 }
2067 }
2068 };
2069
2070 void ShenandoahHeap::update_heap_references(bool concurrent) {
2071 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2072
2073 if (concurrent) {
2074 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2075 workers()->run_task(&task);
2076 } else {
2077 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2078 workers()->run_task(&task);
2079 }
2080 }
2081
2082
2083 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2084 private:
2085 ShenandoahHeapLock* const _lock;
2086
2087 public:
2088 ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2089
2090 void heap_region_do(ShenandoahHeapRegion* r) {
2091 // Drop unnecessary "pinned" state from regions that does not have CP marks
2092 // anymore, as this would allow trashing them.
2093
2094 if (r->is_active()) {
2095 if (r->is_pinned()) {
2096 if (r->pin_count() == 0) {
2097 ShenandoahHeapLocker locker(_lock);
2098 r->make_unpinned();
2099 }
2100 } else {
2101 if (r->pin_count() > 0) {
2102 ShenandoahHeapLocker locker(_lock);
2103 r->make_pinned();
2104 }
2105 }
2106 }
2107 }
2108
2109 bool is_thread_safe() { return true; }
2110 };
2111
2112 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2113 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2114 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2115
2116 {
2117 ShenandoahGCPhase phase(concurrent ?
2118 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2119 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2120 ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2121 parallel_heap_region_iterate(&cl);
2122
2123 assert_pinned_region_status();
2124 }
2125
2126 {
2127 ShenandoahGCPhase phase(concurrent ?
2128 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2129 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2130 trash_cset_regions();
2131 }
2132 }
2133
2134 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2135 {
2136 ShenandoahGCPhase phase(concurrent ?
2137 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2138 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2139 ShenandoahHeapLocker locker(lock());
2140 _free_set->rebuild();
2141 }
2142 }
2143
2144 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2145 print_on(st);
2146 st->cr();
2147 print_heap_regions_on(st);
2148 }
2149
2150 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2151 size_t slice = r->index() / _bitmap_regions_per_slice;
2152
2153 size_t regions_from = _bitmap_regions_per_slice * slice;
2154 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2155 for (size_t g = regions_from; g < regions_to; g++) {
2156 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2157 if (skip_self && g == r->index()) continue;
2158 if (get_region(g)->is_committed()) {
2159 return true;
2160 }
2161 }
2162 return false;
2163 }
2164
2165 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2166 shenandoah_assert_heaplocked();
2167
2168 // Bitmaps in special regions do not need commits
2169 if (_bitmap_region_special) {
2170 return true;
2171 }
2172
2173 if (is_bitmap_slice_committed(r, true)) {
2174 // Some other region from the group is already committed, meaning the bitmap
2175 // slice is already committed, we exit right away.
2176 return true;
2177 }
2178
2179 // Commit the bitmap slice:
2180 size_t slice = r->index() / _bitmap_regions_per_slice;
2181 size_t off = _bitmap_bytes_per_slice * slice;
2182 size_t len = _bitmap_bytes_per_slice;
2183 char* start = (char*) _bitmap_region.start() + off;
2184
2185 if (!os::commit_memory(start, len, false)) {
2186 return false;
2187 }
2188
2189 if (AlwaysPreTouch) {
2190 os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2191 }
2192
2193 return true;
2194 }
2195
2196 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2197 shenandoah_assert_heaplocked();
2198
2199 // Bitmaps in special regions do not need uncommits
2200 if (_bitmap_region_special) {
2201 return true;
2202 }
2203
2204 if (is_bitmap_slice_committed(r, true)) {
2205 // Some other region from the group is still committed, meaning the bitmap
2206 // slice is should stay committed, exit right away.
2207 return true;
2208 }
2209
2210 // Uncommit the bitmap slice:
2211 size_t slice = r->index() / _bitmap_regions_per_slice;
2212 size_t off = _bitmap_bytes_per_slice * slice;
2213 size_t len = _bitmap_bytes_per_slice;
2214 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2215 return false;
2216 }
2217 return true;
2218 }
2219
2220 void ShenandoahHeap::safepoint_synchronize_begin() {
2221 SuspendibleThreadSet::synchronize();
2222 }
2223
2224 void ShenandoahHeap::safepoint_synchronize_end() {
2225 SuspendibleThreadSet::desynchronize();
2226 }
2227
2228 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2229 static const char *msg = "Concurrent uncommit";
2230 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2231 EventMark em("%s", msg);
2232
2233 op_uncommit(shrink_before, shrink_until);
2234 }
2235
2236 void ShenandoahHeap::try_inject_alloc_failure() {
2237 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2238 _inject_alloc_failure.set();
2239 os::naked_short_sleep(1);
2240 if (cancelled_gc()) {
2241 log_info(gc)("Allocation failure was successfully injected");
2242 }
2243 }
2244 }
2245
2246 bool ShenandoahHeap::should_inject_alloc_failure() {
2247 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2248 }
2249
2250 void ShenandoahHeap::initialize_serviceability() {
2251 _memory_pool = new ShenandoahMemoryPool(this);
2252 _cycle_memory_manager.add_pool(_memory_pool);
2253 _stw_memory_manager.add_pool(_memory_pool);
2254 }
2255
2256 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2257 GrowableArray<GCMemoryManager*> memory_managers(2);
2258 memory_managers.append(&_cycle_memory_manager);
2259 memory_managers.append(&_stw_memory_manager);
2260 return memory_managers;
2261 }
2262
2263 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2264 GrowableArray<MemoryPool*> memory_pools(1);
2265 memory_pools.append(_memory_pool);
2266 return memory_pools;
2267 }
2268
2269 MemoryUsage ShenandoahHeap::memory_usage() {
2270 return _memory_pool->get_memory_usage();
2271 }
2272
2273 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2274 _heap(ShenandoahHeap::heap()),
2275 _index(0) {}
2276
2277 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2278 _heap(heap),
2279 _index(0) {}
2280
2281 void ShenandoahRegionIterator::reset() {
2282 _index = 0;
2283 }
2284
2285 bool ShenandoahRegionIterator::has_next() const {
2286 return _index < _heap->num_regions();
2287 }
2288
2289 char ShenandoahHeap::gc_state() const {
2290 return _gc_state.raw_value();
2291 }
2292
2293 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2294 #ifdef ASSERT
2295 assert(_liveness_cache != nullptr, "sanity");
2296 assert(worker_id < _max_workers, "sanity");
2297 for (uint i = 0; i < num_regions(); i++) {
2298 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2299 }
2300 #endif
2301 return _liveness_cache[worker_id];
2302 }
2303
2304 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2305 assert(worker_id < _max_workers, "sanity");
2306 assert(_liveness_cache != nullptr, "sanity");
2307 ShenandoahLiveData* ld = _liveness_cache[worker_id];
2308 for (uint i = 0; i < num_regions(); i++) {
2309 ShenandoahLiveData live = ld[i];
2310 if (live > 0) {
2311 ShenandoahHeapRegion* r = get_region(i);
2312 r->increase_live_data_gc_words(live);
2313 ld[i] = 0;
2314 }
2315 }
2316 }
2317
2318 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2319 if (is_idle()) return false;
2320
2321 // Objects allocated after marking start are implicitly alive, don't need any barriers during
2322 // marking phase.
2323 if (is_concurrent_mark_in_progress() &&
2324 !marking_context()->allocated_after_mark_start(obj)) {
2325 return true;
2326 }
2327
2328 // Can not guarantee obj is deeply good.
2329 if (has_forwarded_objects()) {
2330 return true;
2331 }
2332
2333 return false;
2334 }