1 /*
2 * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27
28 #include "cds/archiveHeapWriter.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "gc/shared/classUnloadingContext.hpp"
31 #include "gc/shared/fullGCForwarding.inline.hpp"
32 #include "gc/shared/gcArguments.hpp"
33 #include "gc/shared/gcTimer.hpp"
34 #include "gc/shared/gcTraceTime.inline.hpp"
35 #include "gc/shared/gc_globals.hpp"
36 #include "gc/shared/locationPrinter.inline.hpp"
37 #include "gc/shared/memAllocator.hpp"
38 #include "gc/shared/plab.hpp"
39 #include "gc/shared/tlab_globals.hpp"
40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
42 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
43 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
44 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
45 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
46 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
47 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
48 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
49 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
50 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
51 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
52 #include "gc/shenandoah/shenandoahControlThread.hpp"
53 #include "gc/shenandoah/shenandoahFreeSet.hpp"
54 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
55 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
56 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
57 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
58 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
59 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
60 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
61 #include "gc/shenandoah/shenandoahInitLogger.hpp"
62 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
63 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
64 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
65 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
66 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
67 #include "gc/shenandoah/shenandoahPadding.hpp"
68 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
69 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
70 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
71 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
72 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
73 #include "gc/shenandoah/shenandoahSTWMark.hpp"
74 #include "gc/shenandoah/shenandoahUncommitThread.hpp"
75 #include "gc/shenandoah/shenandoahUtils.hpp"
76 #include "gc/shenandoah/shenandoahVerifier.hpp"
77 #include "gc/shenandoah/shenandoahVMOperations.hpp"
78 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
79 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
80 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
81 #include "memory/allocation.hpp"
82 #include "memory/classLoaderMetaspace.hpp"
83 #include "memory/memoryReserver.hpp"
84 #include "memory/metaspaceUtils.hpp"
85 #include "memory/universe.hpp"
86 #include "nmt/mallocTracker.hpp"
87 #include "nmt/memTracker.hpp"
88 #include "oops/compressedOops.inline.hpp"
89 #include "prims/jvmtiTagMap.hpp"
90 #include "runtime/atomic.hpp"
91 #include "runtime/globals.hpp"
92 #include "runtime/interfaceSupport.inline.hpp"
93 #include "runtime/java.hpp"
94 #include "runtime/orderAccess.hpp"
95 #include "runtime/safepointMechanism.hpp"
96 #include "runtime/stackWatermarkSet.hpp"
97 #include "runtime/threads.hpp"
98 #include "runtime/vmThread.hpp"
99 #include "utilities/events.hpp"
100 #include "utilities/globalDefinitions.hpp"
101 #include "utilities/powerOfTwo.hpp"
102 #if INCLUDE_JVMCI
103 #include "jvmci/jvmci.hpp"
104 #endif
105 #if INCLUDE_JFR
106 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
107 #endif
108
109 class ShenandoahPretouchHeapTask : public WorkerTask {
110 private:
111 ShenandoahRegionIterator _regions;
112 const size_t _page_size;
113 public:
114 ShenandoahPretouchHeapTask(size_t page_size) :
115 WorkerTask("Shenandoah Pretouch Heap"),
116 _page_size(page_size) {}
117
118 virtual void work(uint worker_id) {
119 ShenandoahHeapRegion* r = _regions.next();
120 while (r != nullptr) {
121 if (r->is_committed()) {
122 os::pretouch_memory(r->bottom(), r->end(), _page_size);
123 }
124 r = _regions.next();
125 }
126 }
127 };
128
129 class ShenandoahPretouchBitmapTask : public WorkerTask {
130 private:
131 ShenandoahRegionIterator _regions;
132 char* _bitmap_base;
133 const size_t _bitmap_size;
134 const size_t _page_size;
135 public:
136 ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
137 WorkerTask("Shenandoah Pretouch Bitmap"),
138 _bitmap_base(bitmap_base),
139 _bitmap_size(bitmap_size),
140 _page_size(page_size) {}
141
142 virtual void work(uint worker_id) {
143 ShenandoahHeapRegion* r = _regions.next();
144 while (r != nullptr) {
145 size_t start = r->index() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
146 size_t end = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
147 assert (end <= _bitmap_size, "end is sane: %zu < %zu", end, _bitmap_size);
148
149 if (r->is_committed()) {
150 os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
151 }
152
153 r = _regions.next();
154 }
155 }
156 };
157
158 static ReservedSpace reserve(size_t size, size_t preferred_page_size) {
159 // When a page size is given we don't want to mix large
160 // and normal pages. If the size is not a multiple of the
161 // page size it will be aligned up to achieve this.
162 size_t alignment = os::vm_allocation_granularity();
163 if (preferred_page_size != os::vm_page_size()) {
164 alignment = MAX2(preferred_page_size, alignment);
165 size = align_up(size, alignment);
166 }
167
168 const ReservedSpace reserved = MemoryReserver::reserve(size, alignment, preferred_page_size, mtGC);
169 if (!reserved.is_reserved()) {
170 vm_exit_during_initialization("Could not reserve space");
171 }
172 return reserved;
173 }
174
175 jint ShenandoahHeap::initialize() {
176 //
177 // Figure out heap sizing
178 //
179
180 size_t init_byte_size = InitialHeapSize;
181 size_t min_byte_size = MinHeapSize;
182 size_t max_byte_size = MaxHeapSize;
183 size_t heap_alignment = HeapAlignment;
184
185 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
186
187 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
188 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
189
190 _num_regions = ShenandoahHeapRegion::region_count();
191 assert(_num_regions == (max_byte_size / reg_size_bytes),
192 "Regions should cover entire heap exactly: %zu != %zu/%zu",
193 _num_regions, max_byte_size, reg_size_bytes);
194
195 size_t num_committed_regions = init_byte_size / reg_size_bytes;
196 num_committed_regions = MIN2(num_committed_regions, _num_regions);
197 assert(num_committed_regions <= _num_regions, "sanity");
198 _initial_size = num_committed_regions * reg_size_bytes;
199
200 size_t num_min_regions = min_byte_size / reg_size_bytes;
201 num_min_regions = MIN2(num_min_regions, _num_regions);
202 assert(num_min_regions <= _num_regions, "sanity");
203 _minimum_size = num_min_regions * reg_size_bytes;
204
205 _soft_max_size = SoftMaxHeapSize;
206
207 _committed = _initial_size;
208
209 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
210 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
211 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
212
213 //
214 // Reserve and commit memory for heap
215 //
216
217 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
218 initialize_reserved_region(heap_rs);
219 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
220 _heap_region_special = heap_rs.special();
221
222 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
223 "Misaligned heap: " PTR_FORMAT, p2i(base()));
224 os::trace_page_sizes_for_requested_size("Heap",
225 max_byte_size, heap_alignment,
226 heap_rs.base(),
227 heap_rs.size(), heap_rs.page_size());
228
229 #if SHENANDOAH_OPTIMIZED_MARKTASK
230 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
231 // Fail if we ever attempt to address more than we can.
232 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
233 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
234 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
235 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
236 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
237 vm_exit_during_initialization("Fatal Error", buf);
238 }
239 #endif
240
241 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
242 if (!_heap_region_special) {
243 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
244 "Cannot commit heap memory");
245 }
246
247 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
248
249 // Now we know the number of regions and heap sizes, initialize the heuristics.
250 initialize_heuristics();
251
252 assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
253
254 //
255 // Worker threads must be initialized after the barrier is configured
256 //
257 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
258 if (_workers == nullptr) {
259 vm_exit_during_initialization("Failed necessary allocation.");
260 } else {
261 _workers->initialize_workers();
262 }
263
264 if (ParallelGCThreads > 1) {
265 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
266 _safepoint_workers->initialize_workers();
267 }
268
269 //
270 // Reserve and commit memory for bitmap(s)
271 //
272
273 size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
274 _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
275
276 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
277
278 guarantee(bitmap_bytes_per_region != 0,
279 "Bitmap bytes per region should not be zero");
280 guarantee(is_power_of_2(bitmap_bytes_per_region),
281 "Bitmap bytes per region should be power of two: %zu", bitmap_bytes_per_region);
282
283 if (bitmap_page_size > bitmap_bytes_per_region) {
284 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
285 _bitmap_bytes_per_slice = bitmap_page_size;
286 } else {
287 _bitmap_regions_per_slice = 1;
288 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
289 }
290
291 guarantee(_bitmap_regions_per_slice >= 1,
292 "Should have at least one region per slice: %zu",
293 _bitmap_regions_per_slice);
294
295 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
296 "Bitmap slices should be page-granular: bps = %zu, page size = %zu",
297 _bitmap_bytes_per_slice, bitmap_page_size);
298
299 ReservedSpace bitmap = reserve(_bitmap_size, bitmap_page_size);
300 os::trace_page_sizes_for_requested_size("Mark Bitmap",
301 bitmap_size_orig, bitmap_page_size,
302 bitmap.base(),
303 bitmap.size(), bitmap.page_size());
304 MemTracker::record_virtual_memory_tag(bitmap, mtGC);
305 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
306 _bitmap_region_special = bitmap.special();
307
308 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
309 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
310 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
311 if (!_bitmap_region_special) {
312 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
313 "Cannot commit bitmap memory");
314 }
315
316 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
317
318 if (ShenandoahVerify) {
319 ReservedSpace verify_bitmap = reserve(_bitmap_size, bitmap_page_size);
320 os::trace_page_sizes_for_requested_size("Verify Bitmap",
321 bitmap_size_orig, bitmap_page_size,
322 verify_bitmap.base(),
323 verify_bitmap.size(), verify_bitmap.page_size());
324 if (!verify_bitmap.special()) {
325 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
326 "Cannot commit verification bitmap memory");
327 }
328 MemTracker::record_virtual_memory_tag(verify_bitmap, mtGC);
329 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
330 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
331 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
332 }
333
334 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
335 size_t aux_bitmap_page_size = bitmap_page_size;
336
337 ReservedSpace aux_bitmap = reserve(_bitmap_size, aux_bitmap_page_size);
338 os::trace_page_sizes_for_requested_size("Aux Bitmap",
339 bitmap_size_orig, aux_bitmap_page_size,
340 aux_bitmap.base(),
341 aux_bitmap.size(), aux_bitmap.page_size());
342 MemTracker::record_virtual_memory_tag(aux_bitmap, mtGC);
343 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
344 _aux_bitmap_region_special = aux_bitmap.special();
345 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
346
347 //
348 // Create regions and region sets
349 //
350 size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
351 size_t region_storage_size_orig = region_align * _num_regions;
352 size_t region_storage_size = align_up(region_storage_size_orig,
353 MAX2(region_page_size, os::vm_allocation_granularity()));
354
355 ReservedSpace region_storage = reserve(region_storage_size, region_page_size);
356 os::trace_page_sizes_for_requested_size("Region Storage",
357 region_storage_size_orig, region_page_size,
358 region_storage.base(),
359 region_storage.size(), region_storage.page_size());
360 MemTracker::record_virtual_memory_tag(region_storage, mtGC);
361 if (!region_storage.special()) {
362 os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
363 "Cannot commit region memory");
364 }
365
366 // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
367 // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
368 // If not successful, bite a bullet and allocate at whatever address.
369 {
370 const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
371 const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
372 const size_t cset_page_size = os::vm_page_size();
373
374 uintptr_t min = round_up_power_of_2(cset_align);
375 uintptr_t max = (1u << 30u);
376 ReservedSpace cset_rs;
377
378 for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
379 char* req_addr = (char*)addr;
380 assert(is_aligned(req_addr, cset_align), "Should be aligned");
381 cset_rs = MemoryReserver::reserve(req_addr, cset_size, cset_align, cset_page_size, mtGC);
382 if (cset_rs.is_reserved()) {
383 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
384 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
385 break;
386 }
387 }
388
389 if (_collection_set == nullptr) {
390 cset_rs = MemoryReserver::reserve(cset_size, cset_align, os::vm_page_size(), mtGC);
391 if (!cset_rs.is_reserved()) {
392 vm_exit_during_initialization("Cannot reserve memory for collection set");
393 }
394
395 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
396 }
397 os::trace_page_sizes_for_requested_size("Collection Set",
398 cset_size, cset_page_size,
399 cset_rs.base(),
400 cset_rs.size(), cset_rs.page_size());
401 }
402
403 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
404 _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
405
406 {
407 ShenandoahHeapLocker locker(lock());
408 _free_set = new ShenandoahFreeSet(this, _num_regions);
409 for (size_t i = 0; i < _num_regions; i++) {
410 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
411 bool is_committed = i < num_committed_regions;
412 void* loc = region_storage.base() + i * region_align;
413
414 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
415 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
416
417 _marking_context->initialize_top_at_mark_start(r);
418 _regions[i] = r;
419 assert(!collection_set()->is_in(i), "New region should not be in collection set");
420
421 _affiliations[i] = ShenandoahAffiliation::FREE;
422 }
423
424 size_t young_cset_regions, old_cset_regions;
425
426 // We are initializing free set. We ignore cset region tallies.
427 size_t first_old, last_old, num_old;
428 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
429 _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
430 }
431
432 if (AlwaysPreTouch) {
433 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
434 // before initialize() below zeroes it with initializing thread. For any given region,
435 // we touch the region and the corresponding bitmaps from the same thread.
436 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
437
438 _pretouch_heap_page_size = heap_page_size;
439 _pretouch_bitmap_page_size = bitmap_page_size;
440
441 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
442 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
443
444 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
445 _workers->run_task(&bcl);
446
447 ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
448 _workers->run_task(&hcl);
449 }
450
451 //
452 // Initialize the rest of GC subsystems
453 //
454
455 _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
456 for (uint worker = 0; worker < _max_workers; worker++) {
457 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
458 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
459 }
460
461 // There should probably be Shenandoah-specific options for these,
462 // just as there are G1-specific options.
463 {
464 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
465 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
466 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
467 }
468
469 _monitoring_support = new ShenandoahMonitoringSupport(this);
470 _phase_timings = new ShenandoahPhaseTimings(max_workers());
471 ShenandoahCodeRoots::initialize();
472
473 if (ShenandoahPacing) {
474 _pacer = new ShenandoahPacer(this);
475 _pacer->setup_for_idle();
476 }
477
478 initialize_controller();
479
480 if (ShenandoahUncommit) {
481 _uncommit_thread = new ShenandoahUncommitThread(this);
482 }
483
484 print_init_logger();
485
486 FullGCForwarding::initialize(_heap_region);
487
488 return JNI_OK;
489 }
490
491 void ShenandoahHeap::initialize_controller() {
492 _control_thread = new ShenandoahControlThread();
493 }
494
495 void ShenandoahHeap::print_init_logger() const {
496 ShenandoahInitLogger::print();
497 }
498
499 void ShenandoahHeap::initialize_mode() {
500 if (ShenandoahGCMode != nullptr) {
501 if (strcmp(ShenandoahGCMode, "satb") == 0) {
502 _gc_mode = new ShenandoahSATBMode();
503 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
504 _gc_mode = new ShenandoahPassiveMode();
505 } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
506 _gc_mode = new ShenandoahGenerationalMode();
507 } else {
508 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
509 }
510 } else {
511 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
512 }
513 _gc_mode->initialize_flags();
514 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
515 vm_exit_during_initialization(
516 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
517 _gc_mode->name()));
518 }
519 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
520 vm_exit_during_initialization(
521 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
522 _gc_mode->name()));
523 }
524 }
525
526 void ShenandoahHeap::initialize_heuristics() {
527 _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity());
528 _global_generation->initialize_heuristics(mode());
529 }
530
531 #ifdef _MSC_VER
532 #pragma warning( push )
533 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
534 #endif
535
536 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
537 CollectedHeap(),
538 _gc_generation(nullptr),
539 _active_generation(nullptr),
540 _initial_size(0),
541 _committed(0),
542 _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
543 _workers(nullptr),
544 _safepoint_workers(nullptr),
545 _heap_region_special(false),
546 _num_regions(0),
547 _regions(nullptr),
548 _affiliations(nullptr),
549 _gc_state_changed(false),
550 _gc_no_progress_count(0),
551 _cancel_requested_time(0),
552 _update_refs_iterator(this),
553 _global_generation(nullptr),
554 _control_thread(nullptr),
555 _uncommit_thread(nullptr),
556 _young_generation(nullptr),
557 _old_generation(nullptr),
558 _shenandoah_policy(policy),
559 _gc_mode(nullptr),
560 _free_set(nullptr),
561 _pacer(nullptr),
562 _verifier(nullptr),
563 _phase_timings(nullptr),
564 _monitoring_support(nullptr),
565 _memory_pool(nullptr),
566 _stw_memory_manager("Shenandoah Pauses"),
567 _cycle_memory_manager("Shenandoah Cycles"),
568 _gc_timer(new ConcurrentGCTimer()),
569 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
570 _marking_context(nullptr),
571 _bitmap_size(0),
572 _bitmap_regions_per_slice(0),
573 _bitmap_bytes_per_slice(0),
574 _bitmap_region_special(false),
575 _aux_bitmap_region_special(false),
576 _liveness_cache(nullptr),
577 _collection_set(nullptr)
578 {
579 // Initialize GC mode early, many subsequent initialization procedures depend on it
580 initialize_mode();
581 _cancelled_gc.set(GCCause::_no_gc);
582 }
583
584 #ifdef _MSC_VER
585 #pragma warning( pop )
586 #endif
587
588 void ShenandoahHeap::print_heap_on(outputStream* st) const {
589 st->print_cr("Shenandoah Heap");
590 st->print_cr(" %zu%s max, %zu%s soft max, %zu%s committed, %zu%s used",
591 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
592 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
593 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
594 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
595 st->print_cr(" %zu x %zu %s regions",
596 num_regions(),
597 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
598 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
599
600 st->print("Status: ");
601 if (has_forwarded_objects()) st->print("has forwarded objects, ");
602 if (!mode()->is_generational()) {
603 if (is_concurrent_mark_in_progress()) st->print("marking,");
604 } else {
605 if (is_concurrent_old_mark_in_progress()) st->print("old marking, ");
606 if (is_concurrent_young_mark_in_progress()) st->print("young marking, ");
607 }
608 if (is_evacuation_in_progress()) st->print("evacuating, ");
609 if (is_update_refs_in_progress()) st->print("updating refs, ");
610 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
611 if (is_full_gc_in_progress()) st->print("full gc, ");
612 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
613 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
614 if (is_concurrent_strong_root_in_progress() &&
615 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
616
617 if (cancelled_gc()) {
618 st->print("cancelled");
619 } else {
620 st->print("not cancelled");
621 }
622 st->cr();
623
624 st->print_cr("Reserved region:");
625 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
626 p2i(reserved_region().start()),
627 p2i(reserved_region().end()));
628
629 ShenandoahCollectionSet* cset = collection_set();
630 st->print_cr("Collection set:");
631 if (cset != nullptr) {
632 st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
633 st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
634 } else {
635 st->print_cr(" (null)");
636 }
637
638 st->cr();
639
640 if (Verbose) {
641 st->cr();
642 print_heap_regions_on(st);
643 }
644 }
645
646 void ShenandoahHeap::print_gc_on(outputStream* st) const {
647 print_heap_regions_on(st);
648 }
649
650 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
651 public:
652 void do_thread(Thread* thread) {
653 assert(thread != nullptr, "Sanity");
654 ShenandoahThreadLocalData::initialize_gclab(thread);
655 }
656 };
657
658 void ShenandoahHeap::post_initialize() {
659 CollectedHeap::post_initialize();
660
661 // Schedule periodic task to report on gc thread CPU utilization
662 _mmu_tracker.initialize();
663
664 MutexLocker ml(Threads_lock);
665
666 ShenandoahInitWorkerGCLABClosure init_gclabs;
667 _workers->threads_do(&init_gclabs);
668
669 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
670 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
671 _workers->set_initialize_gclab();
672
673 // Note that the safepoint workers may require gclabs if the threads are used to create a heap dump
674 // during a concurrent evacuation phase.
675 if (_safepoint_workers != nullptr) {
676 _safepoint_workers->threads_do(&init_gclabs);
677 _safepoint_workers->set_initialize_gclab();
678 }
679
680 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
681 }
682
683 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
684 return _global_generation->heuristics();
685 }
686
687 size_t ShenandoahHeap::used() const {
688 return global_generation()->used();
689 }
690
691 size_t ShenandoahHeap::committed() const {
692 return Atomic::load(&_committed);
693 }
694
695 void ShenandoahHeap::increase_committed(size_t bytes) {
696 shenandoah_assert_heaplocked_or_safepoint();
697 _committed += bytes;
698 }
699
700 void ShenandoahHeap::decrease_committed(size_t bytes) {
701 shenandoah_assert_heaplocked_or_safepoint();
702 _committed -= bytes;
703 }
704
705 // For tracking usage based on allocations, it should be the case that:
706 // * The sum of regions::used == heap::used
707 // * The sum of a generation's regions::used == generation::used
708 // * The sum of a generation's humongous regions::free == generation::humongous_waste
709 // These invariants are checked by the verifier on GC safepoints.
710 //
711 // Additional notes:
712 // * When a mutator's allocation request causes a region to be retired, the
713 // free memory left in that region is considered waste. It does not contribute
714 // to the usage, but it _does_ contribute to allocation rate.
715 // * The bottom of a PLAB must be aligned on card size. In some cases this will
716 // require padding in front of the PLAB (a filler object). Because this padding
717 // is included in the region's used memory we include the padding in the usage
718 // accounting as waste.
719 // * Mutator allocations are used to compute an allocation rate. They are also
720 // sent to the Pacer for those purposes.
721 // * There are three sources of waste:
722 // 1. The padding used to align a PLAB on card size
723 // 2. Region's free is less than minimum TLAB size and is retired
724 // 3. The unused portion of memory in the last region of a humongous object
725 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
726 size_t actual_bytes = req.actual_size() * HeapWordSize;
727 size_t wasted_bytes = req.waste() * HeapWordSize;
728 ShenandoahGeneration* generation = generation_for(req.affiliation());
729
730 if (req.is_gc_alloc()) {
731 assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
732 increase_used(generation, actual_bytes + wasted_bytes);
733 } else {
734 assert(req.is_mutator_alloc(), "Expected mutator alloc here");
735 // padding and actual size both count towards allocation counter
736 generation->increase_allocated(actual_bytes + wasted_bytes);
737
738 // only actual size counts toward usage for mutator allocations
739 increase_used(generation, actual_bytes);
740
741 // notify pacer of both actual size and waste
742 notify_mutator_alloc_words(req.actual_size(), req.waste());
743
744 if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
745 increase_humongous_waste(generation,wasted_bytes);
746 }
747 }
748 }
749
750 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
751 generation->increase_humongous_waste(bytes);
752 if (!generation->is_global()) {
753 global_generation()->increase_humongous_waste(bytes);
754 }
755 }
756
757 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
758 generation->decrease_humongous_waste(bytes);
759 if (!generation->is_global()) {
760 global_generation()->decrease_humongous_waste(bytes);
761 }
762 }
763
764 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
765 generation->increase_used(bytes);
766 if (!generation->is_global()) {
767 global_generation()->increase_used(bytes);
768 }
769 }
770
771 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
772 generation->decrease_used(bytes);
773 if (!generation->is_global()) {
774 global_generation()->decrease_used(bytes);
775 }
776 }
777
778 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
779 if (ShenandoahPacing) {
780 control_thread()->pacing_notify_alloc(words);
781 if (waste > 0) {
782 pacer()->claim_for_alloc<true>(waste);
783 }
784 }
785 }
786
787 size_t ShenandoahHeap::capacity() const {
788 return committed();
789 }
790
791 size_t ShenandoahHeap::max_capacity() const {
792 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
793 }
794
795 size_t ShenandoahHeap::soft_max_capacity() const {
796 size_t v = Atomic::load(&_soft_max_size);
797 assert(min_capacity() <= v && v <= max_capacity(),
798 "Should be in bounds: %zu <= %zu <= %zu",
799 min_capacity(), v, max_capacity());
800 return v;
801 }
802
803 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
804 assert(min_capacity() <= v && v <= max_capacity(),
805 "Should be in bounds: %zu <= %zu <= %zu",
806 min_capacity(), v, max_capacity());
807 Atomic::store(&_soft_max_size, v);
808 }
809
810 size_t ShenandoahHeap::min_capacity() const {
811 return _minimum_size;
812 }
813
814 size_t ShenandoahHeap::initial_capacity() const {
815 return _initial_size;
816 }
817
818 bool ShenandoahHeap::is_in(const void* p) const {
819 if (!is_in_reserved(p)) {
820 return false;
821 }
822
823 if (is_full_gc_move_in_progress()) {
824 // Full GC move is running, we do not have a consistent region
825 // information yet. But we know the pointer is in heap.
826 return true;
827 }
828
829 // Now check if we point to a live section in active region.
830 const ShenandoahHeapRegion* r = heap_region_containing(p);
831 if (p >= r->top()) {
832 return false;
833 }
834
835 if (r->is_active()) {
836 return true;
837 }
838
839 // The region is trash, but won't be recycled until after concurrent weak
840 // roots. We also don't allow mutators to allocate from trash regions
841 // during weak roots. Concurrent class unloading may access unmarked oops
842 // in trash regions.
843 return r->is_trash() && is_concurrent_weak_root_in_progress();
844 }
845
846 void ShenandoahHeap::notify_soft_max_changed() {
847 if (_uncommit_thread != nullptr) {
848 _uncommit_thread->notify_soft_max_changed();
849 }
850 }
851
852 void ShenandoahHeap::notify_explicit_gc_requested() {
853 if (_uncommit_thread != nullptr) {
854 _uncommit_thread->notify_explicit_gc_requested();
855 }
856 }
857
858 bool ShenandoahHeap::check_soft_max_changed() {
859 size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
860 size_t old_soft_max = soft_max_capacity();
861 if (new_soft_max != old_soft_max) {
862 new_soft_max = MAX2(min_capacity(), new_soft_max);
863 new_soft_max = MIN2(max_capacity(), new_soft_max);
864 if (new_soft_max != old_soft_max) {
865 log_info(gc)("Soft Max Heap Size: %zu%s -> %zu%s",
866 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
867 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
868 );
869 set_soft_max_capacity(new_soft_max);
870 return true;
871 }
872 }
873 return false;
874 }
875
876 void ShenandoahHeap::notify_heap_changed() {
877 // Update monitoring counters when we took a new region. This amortizes the
878 // update costs on slow path.
879 monitoring_support()->notify_heap_changed();
880 _heap_changed.try_set();
881 }
882
883 void ShenandoahHeap::set_forced_counters_update(bool value) {
884 monitoring_support()->set_forced_counters_update(value);
885 }
886
887 void ShenandoahHeap::handle_force_counters_update() {
888 monitoring_support()->handle_force_counters_update();
889 }
890
891 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
892 // New object should fit the GCLAB size
893 size_t min_size = MAX2(size, PLAB::min_size());
894
895 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
896 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
897
898 new_size = MIN2(new_size, PLAB::max_size());
899 new_size = MAX2(new_size, PLAB::min_size());
900
901 // Record new heuristic value even if we take any shortcut. This captures
902 // the case when moderately-sized objects always take a shortcut. At some point,
903 // heuristics should catch up with them.
904 log_debug(gc, free)("Set new GCLAB size: %zu", new_size);
905 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
906
907 if (new_size < size) {
908 // New size still does not fit the object. Fall back to shared allocation.
909 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
910 log_debug(gc, free)("New gclab size (%zu) is too small for %zu", new_size, size);
911 return nullptr;
912 }
913
914 // Retire current GCLAB, and allocate a new one.
915 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
916 gclab->retire();
917
918 size_t actual_size = 0;
919 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
920 if (gclab_buf == nullptr) {
921 return nullptr;
922 }
923
924 assert (size <= actual_size, "allocation should fit");
925
926 // ...and clear or zap just allocated TLAB, if needed.
927 if (ZeroTLAB) {
928 Copy::zero_to_words(gclab_buf, actual_size);
929 } else if (ZapTLAB) {
930 // Skip mangling the space corresponding to the object header to
931 // ensure that the returned space is not considered parsable by
932 // any concurrent GC thread.
933 size_t hdr_size = oopDesc::header_size();
934 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
935 }
936 gclab->set_buf(gclab_buf, actual_size);
937 return gclab->allocate(size);
938 }
939
940 // Called from stubs in JIT code or interpreter
941 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
942 size_t requested_size,
943 size_t* actual_size) {
944 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
945 HeapWord* res = allocate_memory(req);
946 if (res != nullptr) {
947 *actual_size = req.actual_size();
948 } else {
949 *actual_size = 0;
950 }
951 return res;
952 }
953
954 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
955 size_t word_size,
956 size_t* actual_size) {
957 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
958 HeapWord* res = allocate_memory(req);
959 if (res != nullptr) {
960 *actual_size = req.actual_size();
961 } else {
962 *actual_size = 0;
963 }
964 return res;
965 }
966
967 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
968 intptr_t pacer_epoch = 0;
969 bool in_new_region = false;
970 HeapWord* result = nullptr;
971
972 if (req.is_mutator_alloc()) {
973 if (ShenandoahPacing) {
974 pacer()->pace_for_alloc(req.size());
975 pacer_epoch = pacer()->epoch();
976 }
977
978 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
979 result = allocate_memory_under_lock(req, in_new_region);
980 }
981
982 // Check that gc overhead is not exceeded.
983 //
984 // Shenandoah will grind along for quite a while allocating one
985 // object at a time using shared (non-tlab) allocations. This check
986 // is testing that the GC overhead limit has not been exceeded.
987 // This will notify the collector to start a cycle, but will raise
988 // an OOME to the mutator if the last Full GCs have not made progress.
989 // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
990 if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
991 control_thread()->handle_alloc_failure(req, false);
992 req.set_actual_size(0);
993 return nullptr;
994 }
995
996 if (result == nullptr) {
997 // Block until control thread reacted, then retry allocation.
998 //
999 // It might happen that one of the threads requesting allocation would unblock
1000 // way later after GC happened, only to fail the second allocation, because
1001 // other threads have already depleted the free storage. In this case, a better
1002 // strategy is to try again, until at least one full GC has completed.
1003 //
1004 // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
1005 // a) We experienced a GC that had good progress, or
1006 // b) We experienced at least one Full GC (whether or not it had good progress)
1007
1008 const size_t original_count = shenandoah_policy()->full_gc_count();
1009 while (result == nullptr && should_retry_allocation(original_count)) {
1010 control_thread()->handle_alloc_failure(req, true);
1011 result = allocate_memory_under_lock(req, in_new_region);
1012 }
1013 if (result != nullptr) {
1014 // If our allocation request has been satisfied after it initially failed, we count this as good gc progress
1015 notify_gc_progress();
1016 }
1017 if (log_develop_is_enabled(Debug, gc, alloc)) {
1018 ResourceMark rm;
1019 log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: %zu"
1020 ", Original: %zu, Latest: %zu",
1021 Thread::current()->name(), p2i(result), req.type_string(), req.size(),
1022 original_count, get_gc_no_progress_count());
1023 }
1024 }
1025 } else {
1026 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1027 result = allocate_memory_under_lock(req, in_new_region);
1028 // Do not call handle_alloc_failure() here, because we cannot block.
1029 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1030 }
1031
1032 if (in_new_region) {
1033 notify_heap_changed();
1034 }
1035
1036 if (result == nullptr) {
1037 req.set_actual_size(0);
1038 }
1039
1040 // This is called regardless of the outcome of the allocation to account
1041 // for any waste created by retiring regions with this request.
1042 increase_used(req);
1043
1044 if (result != nullptr) {
1045 size_t requested = req.size();
1046 size_t actual = req.actual_size();
1047
1048 assert (req.is_lab_alloc() || (requested == actual),
1049 "Only LAB allocations are elastic: %s, requested = %zu, actual = %zu",
1050 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1051
1052 if (req.is_mutator_alloc()) {
1053 // If we requested more than we were granted, give the rest back to pacer.
1054 // This only matters if we are in the same pacing epoch: do not try to unpace
1055 // over the budget for the other phase.
1056 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1057 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1058 }
1059 }
1060 }
1061
1062 return result;
1063 }
1064
1065 inline bool ShenandoahHeap::should_retry_allocation(size_t original_full_gc_count) const {
1066 return shenandoah_policy()->full_gc_count() == original_full_gc_count
1067 && !shenandoah_policy()->is_at_shutdown();
1068 }
1069
1070 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1071 // If we are dealing with mutator allocation, then we may need to block for safepoint.
1072 // We cannot block for safepoint for GC allocations, because there is a high chance
1073 // we are already running at safepoint or from stack watermark machinery, and we cannot
1074 // block again.
1075 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1076
1077 // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1078 if (req.is_old() && !old_generation()->can_allocate(req)) {
1079 return nullptr;
1080 }
1081
1082 // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1083 // memory.
1084 HeapWord* result = _free_set->allocate(req, in_new_region);
1085
1086 // Record the plab configuration for this result and register the object.
1087 if (result != nullptr && req.is_old()) {
1088 old_generation()->configure_plab_for_current_thread(req);
1089 if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1090 // Register the newly allocated object while we're holding the global lock since there's no synchronization
1091 // built in to the implementation of register_object(). There are potential races when multiple independent
1092 // threads are allocating objects, some of which might span the same card region. For example, consider
1093 // a card table's memory region within which three objects are being allocated by three different threads:
1094 //
1095 // objects being "concurrently" allocated:
1096 // [-----a------][-----b-----][--------------c------------------]
1097 // [---- card table memory range --------------]
1098 //
1099 // Before any objects are allocated, this card's memory range holds no objects. Note that allocation of object a
1100 // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1101 // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1102 // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1103 // card region.
1104 //
1105 // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1106 // last-start representing object b while first-start represents object c. This is why we need to require all
1107 // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1108 old_generation()->card_scan()->register_object(result);
1109 }
1110 }
1111
1112 return result;
1113 }
1114
1115 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1116 bool* gc_overhead_limit_was_exceeded) {
1117 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1118 return allocate_memory(req);
1119 }
1120
1121 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1122 size_t size,
1123 Metaspace::MetadataType mdtype) {
1124 MetaWord* result;
1125
1126 // Inform metaspace OOM to GC heuristics if class unloading is possible.
1127 ShenandoahHeuristics* h = global_generation()->heuristics();
1128 if (h->can_unload_classes()) {
1129 h->record_metaspace_oom();
1130 }
1131
1132 // Expand and retry allocation
1133 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1134 if (result != nullptr) {
1135 return result;
1136 }
1137
1138 // Start full GC
1139 collect(GCCause::_metadata_GC_clear_soft_refs);
1140
1141 // Retry allocation
1142 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1143 if (result != nullptr) {
1144 return result;
1145 }
1146
1147 // Expand and retry allocation
1148 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1149 if (result != nullptr) {
1150 return result;
1151 }
1152
1153 // Out of memory
1154 return nullptr;
1155 }
1156
1157 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1158 private:
1159 ShenandoahHeap* const _heap;
1160 Thread* const _thread;
1161 public:
1162 ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1163 _heap(heap), _thread(Thread::current()) {}
1164
1165 void do_object(oop p) {
1166 shenandoah_assert_marked(nullptr, p);
1167 if (!p->is_forwarded()) {
1168 _heap->evacuate_object(p, _thread);
1169 }
1170 }
1171 };
1172
1173 class ShenandoahEvacuationTask : public WorkerTask {
1174 private:
1175 ShenandoahHeap* const _sh;
1176 ShenandoahCollectionSet* const _cs;
1177 bool _concurrent;
1178 public:
1179 ShenandoahEvacuationTask(ShenandoahHeap* sh,
1180 ShenandoahCollectionSet* cs,
1181 bool concurrent) :
1182 WorkerTask("Shenandoah Evacuation"),
1183 _sh(sh),
1184 _cs(cs),
1185 _concurrent(concurrent)
1186 {}
1187
1188 void work(uint worker_id) {
1189 if (_concurrent) {
1190 ShenandoahConcurrentWorkerSession worker_session(worker_id);
1191 ShenandoahSuspendibleThreadSetJoiner stsj;
1192 ShenandoahEvacOOMScope oom_evac_scope;
1193 do_work();
1194 } else {
1195 ShenandoahParallelWorkerSession worker_session(worker_id);
1196 ShenandoahEvacOOMScope oom_evac_scope;
1197 do_work();
1198 }
1199 }
1200
1201 private:
1202 void do_work() {
1203 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1204 ShenandoahHeapRegion* r;
1205 while ((r =_cs->claim_next()) != nullptr) {
1206 assert(r->has_live(), "Region %zu should have been reclaimed early", r->index());
1207 _sh->marked_object_iterate(r, &cl);
1208
1209 if (ShenandoahPacing) {
1210 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1211 }
1212
1213 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1214 break;
1215 }
1216 }
1217 }
1218 };
1219
1220 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1221 private:
1222 bool const _resize;
1223 public:
1224 explicit ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1225 void do_thread(Thread* thread) override {
1226 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1227 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1228 gclab->retire();
1229 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1230 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1231 }
1232
1233 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1234 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1235 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1236
1237 // There are two reasons to retire all plabs between old-gen evacuation passes.
1238 // 1. We need to make the plab memory parsable by remembered-set scanning.
1239 // 2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1240 ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1241 if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1242 ShenandoahThreadLocalData::set_plab_size(thread, 0);
1243 }
1244 }
1245 }
1246 };
1247
1248 class ShenandoahGCStatePropagatorHandshakeClosure : public HandshakeClosure {
1249 public:
1250 explicit ShenandoahGCStatePropagatorHandshakeClosure(char gc_state) :
1251 HandshakeClosure("Shenandoah GC State Change"),
1252 _gc_state(gc_state) {}
1253
1254 void do_thread(Thread* thread) override {
1255 ShenandoahThreadLocalData::set_gc_state(thread, _gc_state);
1256 }
1257 private:
1258 char _gc_state;
1259 };
1260
1261 class ShenandoahPrepareForUpdateRefsHandshakeClosure : public HandshakeClosure {
1262 public:
1263 explicit ShenandoahPrepareForUpdateRefsHandshakeClosure(char gc_state) :
1264 HandshakeClosure("Shenandoah Prepare for Update Refs"),
1265 _retire(ResizeTLAB), _propagator(gc_state) {}
1266
1267 void do_thread(Thread* thread) override {
1268 _propagator.do_thread(thread);
1269 if (ShenandoahThreadLocalData::gclab(thread) != nullptr) {
1270 _retire.do_thread(thread);
1271 }
1272 }
1273 private:
1274 ShenandoahRetireGCLABClosure _retire;
1275 ShenandoahGCStatePropagatorHandshakeClosure _propagator;
1276 };
1277
1278 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1279 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1280 workers()->run_task(&task);
1281 }
1282
1283 void ShenandoahHeap::concurrent_prepare_for_update_refs() {
1284 {
1285 // Java threads take this lock while they are being attached and added to the list of thread.
1286 // If another thread holds this lock before we update the gc state, it will receive a stale
1287 // gc state, but they will have been added to the list of java threads and so will be corrected
1288 // by the following handshake.
1289 MutexLocker lock(Threads_lock);
1290
1291 // A cancellation at this point means the degenerated cycle must resume from update-refs.
1292 set_gc_state_concurrent(EVACUATION, false);
1293 set_gc_state_concurrent(WEAK_ROOTS, false);
1294 set_gc_state_concurrent(UPDATE_REFS, true);
1295 }
1296
1297 // This will propagate the gc state and retire gclabs and plabs for threads that require it.
1298 ShenandoahPrepareForUpdateRefsHandshakeClosure prepare_for_update_refs(_gc_state.raw_value());
1299
1300 // The handshake won't touch worker threads (or control thread, or VM thread), so do those separately.
1301 Threads::non_java_threads_do(&prepare_for_update_refs);
1302
1303 // Now retire gclabs and plabs and propagate gc_state for mutator threads
1304 Handshake::execute(&prepare_for_update_refs);
1305
1306 _update_refs_iterator.reset();
1307 }
1308
1309 class ShenandoahCompositeHandshakeClosure : public HandshakeClosure {
1310 HandshakeClosure* _handshake_1;
1311 HandshakeClosure* _handshake_2;
1312 public:
1313 ShenandoahCompositeHandshakeClosure(HandshakeClosure* handshake_1, HandshakeClosure* handshake_2) :
1314 HandshakeClosure(handshake_2->name()),
1315 _handshake_1(handshake_1), _handshake_2(handshake_2) {}
1316
1317 void do_thread(Thread* thread) override {
1318 _handshake_1->do_thread(thread);
1319 _handshake_2->do_thread(thread);
1320 }
1321 };
1322
1323 void ShenandoahHeap::concurrent_final_roots(HandshakeClosure* handshake_closure) {
1324 {
1325 assert(!is_evacuation_in_progress(), "Should not evacuate for abbreviated or old cycles");
1326 MutexLocker lock(Threads_lock);
1327 set_gc_state_concurrent(WEAK_ROOTS, false);
1328 }
1329
1330 ShenandoahGCStatePropagatorHandshakeClosure propagator(_gc_state.raw_value());
1331 Threads::non_java_threads_do(&propagator);
1332 if (handshake_closure == nullptr) {
1333 Handshake::execute(&propagator);
1334 } else {
1335 ShenandoahCompositeHandshakeClosure composite(&propagator, handshake_closure);
1336 Handshake::execute(&composite);
1337 }
1338 }
1339
1340 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1341 assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1342 if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1343 // This thread went through the OOM during evac protocol. It is safe to return
1344 // the forward pointer. It must not attempt to evacuate any other objects.
1345 return ShenandoahBarrierSet::resolve_forwarded(p);
1346 }
1347
1348 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1349
1350 ShenandoahHeapRegion* r = heap_region_containing(p);
1351 assert(!r->is_humongous(), "never evacuate humongous objects");
1352
1353 ShenandoahAffiliation target_gen = r->affiliation();
1354 return try_evacuate_object(p, thread, r, target_gen);
1355 }
1356
1357 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1358 ShenandoahAffiliation target_gen) {
1359 assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1360 assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1361 bool alloc_from_lab = true;
1362 HeapWord* copy = nullptr;
1363
1364 markWord mark = p->mark();
1365 if (ShenandoahForwarding::is_forwarded(mark)) {
1366 return ShenandoahForwarding::get_forwardee(p);
1367 }
1368 size_t old_size = ShenandoahForwarding::size(p);
1369 size_t size = p->copy_size(old_size, mark);
1370
1371 #ifdef ASSERT
1372 if (ShenandoahOOMDuringEvacALot &&
1373 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1374 copy = nullptr;
1375 } else {
1376 #endif
1377 if (UseTLAB) {
1378 copy = allocate_from_gclab(thread, size);
1379 }
1380 if (copy == nullptr) {
1381 // If we failed to allocate in LAB, we'll try a shared allocation.
1382 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1383 copy = allocate_memory(req);
1384 alloc_from_lab = false;
1385 }
1386 #ifdef ASSERT
1387 }
1388 #endif
1389
1390 if (copy == nullptr) {
1391 control_thread()->handle_alloc_failure_evac(size);
1392
1393 _oom_evac_handler.handle_out_of_memory_during_evacuation();
1394
1395 return ShenandoahBarrierSet::resolve_forwarded(p);
1396 }
1397
1398 // Copy the object:
1399 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, old_size);
1400
1401 // Try to install the new forwarding pointer.
1402 oop copy_val = cast_to_oop(copy);
1403 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1404 if (result == copy_val) {
1405 // Successfully evacuated. Our copy is now the public one!
1406 copy_val->initialize_hash_if_necessary(p);
1407 ContinuationGCSupport::relativize_stack_chunk(copy_val);
1408 shenandoah_assert_correct(nullptr, copy_val);
1409 return copy_val;
1410 } else {
1411 // Failed to evacuate. We need to deal with the object that is left behind. Since this
1412 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1413 // But if it happens to contain references to evacuated regions, those references would
1414 // not get updated for this stale copy during this cycle, and we will crash while scanning
1415 // it the next cycle.
1416 if (alloc_from_lab) {
1417 // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1418 // object will overwrite this stale copy, or the filler object on LAB retirement will
1419 // do this.
1420 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1421 } else {
1422 // For non-LAB allocations, we have no way to retract the allocation, and
1423 // have to explicitly overwrite the copy with the filler object. With that overwrite,
1424 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1425 assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1426 fill_with_object(copy, size);
1427 shenandoah_assert_correct(nullptr, copy_val);
1428 // For non-LAB allocations, the object has already been registered
1429 }
1430 shenandoah_assert_correct(nullptr, result);
1431 return result;
1432 }
1433 }
1434
1435 void ShenandoahHeap::trash_cset_regions() {
1436 ShenandoahHeapLocker locker(lock());
1437
1438 ShenandoahCollectionSet* set = collection_set();
1439 ShenandoahHeapRegion* r;
1440 set->clear_current_index();
1441 while ((r = set->next()) != nullptr) {
1442 r->make_trash();
1443 }
1444 collection_set()->clear();
1445 }
1446
1447 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1448 st->print_cr("Heap Regions:");
1449 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1450 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1451 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1452 st->print_cr("UWM=update watermark, U=used");
1453 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1454 st->print_cr("S=shared allocs, L=live data");
1455 st->print_cr("CP=critical pins");
1456
1457 for (size_t i = 0; i < num_regions(); i++) {
1458 get_region(i)->print_on(st);
1459 }
1460 }
1461
1462 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) const {
1463 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1464 assert(!start->has_live(), "liveness must be zero");
1465
1466 // Do not try to get the size of this humongous object. STW collections will
1467 // have already unloaded classes, so an unmarked object may have a bad klass pointer.
1468 ShenandoahHeapRegion* region = start;
1469 size_t index = region->index();
1470 do {
1471 assert(region->is_humongous(), "Expect correct humongous start or continuation");
1472 assert(!region->is_cset(), "Humongous region should not be in collection set");
1473 region->make_trash_immediate();
1474 region = get_region(++index);
1475 } while (region != nullptr && region->is_humongous_continuation());
1476
1477 // Return number of regions trashed
1478 return index - start->index();
1479 }
1480
1481 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1482 public:
1483 ShenandoahCheckCleanGCLABClosure() {}
1484 void do_thread(Thread* thread) {
1485 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1486 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1487 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1488
1489 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1490 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1491 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1492 assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1493 }
1494 }
1495 };
1496
1497 void ShenandoahHeap::labs_make_parsable() {
1498 assert(UseTLAB, "Only call with UseTLAB");
1499
1500 ShenandoahRetireGCLABClosure cl(false);
1501
1502 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1503 ThreadLocalAllocBuffer& tlab = t->tlab();
1504 tlab.make_parsable();
1505 if (ZeroTLAB) {
1506 t->retire_tlab();
1507 }
1508 cl.do_thread(t);
1509 }
1510
1511 workers()->threads_do(&cl);
1512
1513 if (safepoint_workers() != nullptr) {
1514 safepoint_workers()->threads_do(&cl);
1515 }
1516 }
1517
1518 void ShenandoahHeap::tlabs_retire(bool resize) {
1519 assert(UseTLAB, "Only call with UseTLAB");
1520 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1521
1522 ThreadLocalAllocStats stats;
1523
1524 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1525 t->retire_tlab(&stats);
1526 if (resize) {
1527 t->tlab().resize();
1528 }
1529 }
1530
1531 stats.publish();
1532
1533 #ifdef ASSERT
1534 ShenandoahCheckCleanGCLABClosure cl;
1535 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1536 cl.do_thread(t);
1537 }
1538 workers()->threads_do(&cl);
1539 #endif
1540 }
1541
1542 void ShenandoahHeap::gclabs_retire(bool resize) {
1543 assert(UseTLAB, "Only call with UseTLAB");
1544 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1545
1546 ShenandoahRetireGCLABClosure cl(resize);
1547 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1548 cl.do_thread(t);
1549 }
1550
1551 workers()->threads_do(&cl);
1552
1553 if (safepoint_workers() != nullptr) {
1554 safepoint_workers()->threads_do(&cl);
1555 }
1556 }
1557
1558 // Returns size in bytes
1559 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1560 // Return the max allowed size, and let the allocation path
1561 // figure out the safe size for current allocation.
1562 return ShenandoahHeapRegion::max_tlab_size_bytes();
1563 }
1564
1565 size_t ShenandoahHeap::max_tlab_size() const {
1566 // Returns size in words
1567 return ShenandoahHeapRegion::max_tlab_size_words();
1568 }
1569
1570 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1571 // These requests are ignored because we can't easily have Shenandoah jump into
1572 // a synchronous (degenerated or full) cycle while it is in the middle of a concurrent
1573 // cycle. We _could_ cancel the concurrent cycle and then try to run a cycle directly
1574 // on the VM thread, but this would confuse the control thread mightily and doesn't
1575 // seem worth the trouble. Instead, we will have the caller thread run (and wait for) a
1576 // concurrent cycle in the prologue of the heap inspect/dump operation. This is how
1577 // other concurrent collectors in the JVM handle this scenario as well.
1578 assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
1579 guarantee(cause == GCCause::_heap_dump || cause == GCCause::_heap_inspection, "Invalid cause");
1580 }
1581
1582 void ShenandoahHeap::collect(GCCause::Cause cause) {
1583 control_thread()->request_gc(cause);
1584 }
1585
1586 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1587 //assert(false, "Shouldn't need to do full collections");
1588 }
1589
1590 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1591 ShenandoahHeapRegion* r = heap_region_containing(addr);
1592 if (r != nullptr) {
1593 return r->block_start(addr);
1594 }
1595 return nullptr;
1596 }
1597
1598 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1599 ShenandoahHeapRegion* r = heap_region_containing(addr);
1600 return r->block_is_obj(addr);
1601 }
1602
1603 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1604 return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1605 }
1606
1607 void ShenandoahHeap::prepare_for_verify() {
1608 if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1609 labs_make_parsable();
1610 }
1611 }
1612
1613 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1614 if (_shenandoah_policy->is_at_shutdown()) {
1615 return;
1616 }
1617
1618 if (_control_thread != nullptr) {
1619 tcl->do_thread(_control_thread);
1620 }
1621
1622 if (_uncommit_thread != nullptr) {
1623 tcl->do_thread(_uncommit_thread);
1624 }
1625
1626 workers()->threads_do(tcl);
1627 if (_safepoint_workers != nullptr) {
1628 _safepoint_workers->threads_do(tcl);
1629 }
1630 }
1631
1632 void ShenandoahHeap::print_tracing_info() const {
1633 LogTarget(Info, gc, stats) lt;
1634 if (lt.is_enabled()) {
1635 ResourceMark rm;
1636 LogStream ls(lt);
1637
1638 phase_timings()->print_global_on(&ls);
1639
1640 ls.cr();
1641 ls.cr();
1642
1643 shenandoah_policy()->print_gc_stats(&ls);
1644
1645 ls.cr();
1646 ls.cr();
1647 }
1648 }
1649
1650 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1651 shenandoah_assert_control_or_vm_thread_at_safepoint();
1652 _gc_generation = generation;
1653 }
1654
1655 // Active generation may only be set by the VM thread at a safepoint.
1656 void ShenandoahHeap::set_active_generation() {
1657 assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1658 assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1659 assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1660 _active_generation = _gc_generation;
1661 }
1662
1663 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1664 shenandoah_policy()->record_collection_cause(cause);
1665
1666 const GCCause::Cause current = gc_cause();
1667 assert(current == GCCause::_no_gc, "Over-writing cause: %s, with: %s",
1668 GCCause::to_string(current), GCCause::to_string(cause));
1669 assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1670
1671 set_gc_cause(cause);
1672 set_gc_generation(generation);
1673
1674 generation->heuristics()->record_cycle_start();
1675 }
1676
1677 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1678 assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1679 assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1680
1681 generation->heuristics()->record_cycle_end();
1682 if (mode()->is_generational() && generation->is_global()) {
1683 // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1684 young_generation()->heuristics()->record_cycle_end();
1685 old_generation()->heuristics()->record_cycle_end();
1686 }
1687
1688 set_gc_generation(nullptr);
1689 set_gc_cause(GCCause::_no_gc);
1690 }
1691
1692 void ShenandoahHeap::verify(VerifyOption vo) {
1693 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1694 if (ShenandoahVerify) {
1695 verifier()->verify_generic(vo);
1696 } else {
1697 // TODO: Consider allocating verification bitmaps on demand,
1698 // and turn this on unconditionally.
1699 }
1700 }
1701 }
1702 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1703 return _free_set->capacity();
1704 }
1705
1706 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1707 private:
1708 MarkBitMap* _bitmap;
1709 ShenandoahScanObjectStack* _oop_stack;
1710 ShenandoahHeap* const _heap;
1711 ShenandoahMarkingContext* const _marking_context;
1712
1713 template <class T>
1714 void do_oop_work(T* p) {
1715 T o = RawAccess<>::oop_load(p);
1716 if (!CompressedOops::is_null(o)) {
1717 oop obj = CompressedOops::decode_not_null(o);
1718 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1719 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1720 return;
1721 }
1722 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1723
1724 assert(oopDesc::is_oop(obj), "must be a valid oop");
1725 if (!_bitmap->is_marked(obj)) {
1726 _bitmap->mark(obj);
1727 _oop_stack->push(obj);
1728 }
1729 }
1730 }
1731 public:
1732 ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1733 _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1734 _marking_context(_heap->marking_context()) {}
1735 void do_oop(oop* p) { do_oop_work(p); }
1736 void do_oop(narrowOop* p) { do_oop_work(p); }
1737 };
1738
1739 /*
1740 * This is public API, used in preparation of object_iterate().
1741 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1742 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1743 * control, we call SH::tlabs_retire, SH::gclabs_retire.
1744 */
1745 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1746 // No-op.
1747 }
1748
1749 /*
1750 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1751 *
1752 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1753 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1754 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1755 * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1756 * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1757 * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1758 * wiped the bitmap in preparation for next marking).
1759 *
1760 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1761 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1762 * is allowed to report dead objects, but is not required to do so.
1763 */
1764 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1765 // Reset bitmap
1766 if (!prepare_aux_bitmap_for_iteration())
1767 return;
1768
1769 ShenandoahScanObjectStack oop_stack;
1770 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1771 // Seed the stack with root scan
1772 scan_roots_for_iteration(&oop_stack, &oops);
1773
1774 // Work through the oop stack to traverse heap
1775 while (! oop_stack.is_empty()) {
1776 oop obj = oop_stack.pop();
1777 assert(oopDesc::is_oop(obj), "must be a valid oop");
1778 cl->do_object(obj);
1779 obj->oop_iterate(&oops);
1780 }
1781
1782 assert(oop_stack.is_empty(), "should be empty");
1783 // Reclaim bitmap
1784 reclaim_aux_bitmap_for_iteration();
1785 }
1786
1787 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1788 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1789
1790 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1791 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1792 return false;
1793 }
1794 // Reset bitmap
1795 _aux_bit_map.clear();
1796 return true;
1797 }
1798
1799 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1800 // Process GC roots according to current GC cycle
1801 // This populates the work stack with initial objects
1802 // It is important to relinquish the associated locks before diving
1803 // into heap dumper
1804 uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1805 ShenandoahHeapIterationRootScanner rp(n_workers);
1806 rp.roots_do(oops);
1807 }
1808
1809 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1810 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1811 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1812 }
1813 }
1814
1815 // Closure for parallelly iterate objects
1816 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1817 private:
1818 MarkBitMap* _bitmap;
1819 ShenandoahObjToScanQueue* _queue;
1820 ShenandoahHeap* const _heap;
1821 ShenandoahMarkingContext* const _marking_context;
1822
1823 template <class T>
1824 void do_oop_work(T* p) {
1825 T o = RawAccess<>::oop_load(p);
1826 if (!CompressedOops::is_null(o)) {
1827 oop obj = CompressedOops::decode_not_null(o);
1828 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1829 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1830 return;
1831 }
1832 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1833
1834 assert(oopDesc::is_oop(obj), "Must be a valid oop");
1835 if (_bitmap->par_mark(obj)) {
1836 _queue->push(ShenandoahMarkTask(obj));
1837 }
1838 }
1839 }
1840 public:
1841 ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1842 _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1843 _marking_context(_heap->marking_context()) {}
1844 void do_oop(oop* p) { do_oop_work(p); }
1845 void do_oop(narrowOop* p) { do_oop_work(p); }
1846 };
1847
1848 // Object iterator for parallel heap iteraion.
1849 // The root scanning phase happenes in construction as a preparation of
1850 // parallel marking queues.
1851 // Every worker processes it's own marking queue. work-stealing is used
1852 // to balance workload.
1853 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1854 private:
1855 uint _num_workers;
1856 bool _init_ready;
1857 MarkBitMap* _aux_bit_map;
1858 ShenandoahHeap* _heap;
1859 ShenandoahScanObjectStack _roots_stack; // global roots stack
1860 ShenandoahObjToScanQueueSet* _task_queues;
1861 public:
1862 ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1863 _num_workers(num_workers),
1864 _init_ready(false),
1865 _aux_bit_map(bitmap),
1866 _heap(ShenandoahHeap::heap()) {
1867 // Initialize bitmap
1868 _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1869 if (!_init_ready) {
1870 return;
1871 }
1872
1873 ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1874 _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1875
1876 _init_ready = prepare_worker_queues();
1877 }
1878
1879 ~ShenandoahParallelObjectIterator() {
1880 // Reclaim bitmap
1881 _heap->reclaim_aux_bitmap_for_iteration();
1882 // Reclaim queue for workers
1883 if (_task_queues!= nullptr) {
1884 for (uint i = 0; i < _num_workers; ++i) {
1885 ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1886 if (q != nullptr) {
1887 delete q;
1888 _task_queues->register_queue(i, nullptr);
1889 }
1890 }
1891 delete _task_queues;
1892 _task_queues = nullptr;
1893 }
1894 }
1895
1896 virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1897 if (_init_ready) {
1898 object_iterate_parallel(cl, worker_id, _task_queues);
1899 }
1900 }
1901
1902 private:
1903 // Divide global root_stack into worker queues
1904 bool prepare_worker_queues() {
1905 _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1906 // Initialize queues for every workers
1907 for (uint i = 0; i < _num_workers; ++i) {
1908 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1909 _task_queues->register_queue(i, task_queue);
1910 }
1911 // Divide roots among the workers. Assume that object referencing distribution
1912 // is related with root kind, use round-robin to make every worker have same chance
1913 // to process every kind of roots
1914 size_t roots_num = _roots_stack.size();
1915 if (roots_num == 0) {
1916 // No work to do
1917 return false;
1918 }
1919
1920 for (uint j = 0; j < roots_num; j++) {
1921 uint stack_id = j % _num_workers;
1922 oop obj = _roots_stack.pop();
1923 _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1924 }
1925 return true;
1926 }
1927
1928 void object_iterate_parallel(ObjectClosure* cl,
1929 uint worker_id,
1930 ShenandoahObjToScanQueueSet* queue_set) {
1931 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1932 assert(queue_set != nullptr, "task queue must not be null");
1933
1934 ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1935 assert(q != nullptr, "object iterate queue must not be null");
1936
1937 ShenandoahMarkTask t;
1938 ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1939
1940 // Work through the queue to traverse heap.
1941 // Steal when there is no task in queue.
1942 while (q->pop(t) || queue_set->steal(worker_id, t)) {
1943 oop obj = t.obj();
1944 assert(oopDesc::is_oop(obj), "must be a valid oop");
1945 cl->do_object(obj);
1946 obj->oop_iterate(&oops);
1947 }
1948 assert(q->is_empty(), "should be empty");
1949 }
1950 };
1951
1952 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1953 return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1954 }
1955
1956 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1957 void ShenandoahHeap::keep_alive(oop obj) {
1958 if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1959 ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1960 }
1961 }
1962
1963 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1964 for (size_t i = 0; i < num_regions(); i++) {
1965 ShenandoahHeapRegion* current = get_region(i);
1966 blk->heap_region_do(current);
1967 }
1968 }
1969
1970 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1971 private:
1972 ShenandoahHeap* const _heap;
1973 ShenandoahHeapRegionClosure* const _blk;
1974 size_t const _stride;
1975
1976 shenandoah_padding(0);
1977 volatile size_t _index;
1978 shenandoah_padding(1);
1979
1980 public:
1981 ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
1982 WorkerTask("Shenandoah Parallel Region Operation"),
1983 _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
1984
1985 void work(uint worker_id) {
1986 ShenandoahParallelWorkerSession worker_session(worker_id);
1987 size_t stride = _stride;
1988
1989 size_t max = _heap->num_regions();
1990 while (Atomic::load(&_index) < max) {
1991 size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1992 size_t start = cur;
1993 size_t end = MIN2(cur + stride, max);
1994 if (start >= max) break;
1995
1996 for (size_t i = cur; i < end; i++) {
1997 ShenandoahHeapRegion* current = _heap->get_region(i);
1998 _blk->heap_region_do(current);
1999 }
2000 }
2001 }
2002 };
2003
2004 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2005 assert(blk->is_thread_safe(), "Only thread-safe closures here");
2006 const uint active_workers = workers()->active_workers();
2007 const size_t n_regions = num_regions();
2008 size_t stride = ShenandoahParallelRegionStride;
2009 if (stride == 0 && active_workers > 1) {
2010 // Automatically derive the stride to balance the work between threads
2011 // evenly. Do not try to split work if below the reasonable threshold.
2012 constexpr size_t threshold = 4096;
2013 stride = n_regions <= threshold ?
2014 threshold :
2015 (n_regions + active_workers - 1) / active_workers;
2016 }
2017
2018 if (n_regions > stride && active_workers > 1) {
2019 ShenandoahParallelHeapRegionTask task(blk, stride);
2020 workers()->run_task(&task);
2021 } else {
2022 heap_region_iterate(blk);
2023 }
2024 }
2025
2026 class ShenandoahRendezvousHandshakeClosure : public HandshakeClosure {
2027 public:
2028 inline ShenandoahRendezvousHandshakeClosure(const char* name) : HandshakeClosure(name) {}
2029 inline void do_thread(Thread* thread) {}
2030 };
2031
2032 void ShenandoahHeap::rendezvous_threads(const char* name) {
2033 ShenandoahRendezvousHandshakeClosure cl(name);
2034 Handshake::execute(&cl);
2035 }
2036
2037 void ShenandoahHeap::recycle_trash() {
2038 free_set()->recycle_trash();
2039 }
2040
2041 void ShenandoahHeap::do_class_unloading() {
2042 _unloader.unload();
2043 if (mode()->is_generational()) {
2044 old_generation()->set_parsable(false);
2045 }
2046 }
2047
2048 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
2049 // Weak refs processing
2050 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2051 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2052 ShenandoahTimingsTracker t(phase);
2053 ShenandoahGCWorkerPhase worker_phase(phase);
2054 shenandoah_assert_generations_reconciled();
2055 gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2056 }
2057
2058 void ShenandoahHeap::prepare_update_heap_references() {
2059 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2060
2061 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2062 // make them parsable for update code to work correctly. Plus, we can compute new sizes
2063 // for future GCLABs here.
2064 if (UseTLAB) {
2065 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2066 gclabs_retire(ResizeTLAB);
2067 }
2068
2069 _update_refs_iterator.reset();
2070 }
2071
2072 void ShenandoahHeap::propagate_gc_state_to_all_threads() {
2073 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2074 if (_gc_state_changed) {
2075 ShenandoahGCStatePropagatorHandshakeClosure propagator(_gc_state.raw_value());
2076 Threads::threads_do(&propagator);
2077 _gc_state_changed = false;
2078 }
2079 }
2080
2081 void ShenandoahHeap::set_gc_state_at_safepoint(uint mask, bool value) {
2082 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2083 _gc_state.set_cond(mask, value);
2084 _gc_state_changed = true;
2085 }
2086
2087 void ShenandoahHeap::set_gc_state_concurrent(uint mask, bool value) {
2088 // Holding the thread lock here assures that any thread created after we change the gc
2089 // state will have the correct state. It also prevents attaching threads from seeing
2090 // an inconsistent state. See ShenandoahBarrierSet::on_thread_attach for reference. Established
2091 // threads will use their thread local copy of the gc state (changed by a handshake, or on a
2092 // safepoint).
2093 assert(Threads_lock->is_locked(), "Must hold thread lock for concurrent gc state change");
2094 _gc_state.set_cond(mask, value);
2095 }
2096
2097 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2098 uint mask;
2099 assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
2100 if (!in_progress && is_concurrent_old_mark_in_progress()) {
2101 assert(mode()->is_generational(), "Only generational GC has old marking");
2102 assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
2103 // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
2104 mask = YOUNG_MARKING;
2105 } else {
2106 mask = MARKING | YOUNG_MARKING;
2107 }
2108 set_gc_state_at_safepoint(mask, in_progress);
2109 manage_satb_barrier(in_progress);
2110 }
2111
2112 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2113 #ifdef ASSERT
2114 // has_forwarded_objects() iff UPDATE_REFS or EVACUATION
2115 bool has_forwarded = has_forwarded_objects();
2116 bool updating_or_evacuating = _gc_state.is_set(UPDATE_REFS | EVACUATION);
2117 bool evacuating = _gc_state.is_set(EVACUATION);
2118 assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
2119 "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
2120 #endif
2121 if (!in_progress && is_concurrent_young_mark_in_progress()) {
2122 // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2123 assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2124 set_gc_state_at_safepoint(OLD_MARKING, in_progress);
2125 } else {
2126 set_gc_state_at_safepoint(MARKING | OLD_MARKING, in_progress);
2127 }
2128 manage_satb_barrier(in_progress);
2129 }
2130
2131 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2132 return old_generation()->is_preparing_for_mark();
2133 }
2134
2135 void ShenandoahHeap::manage_satb_barrier(bool active) {
2136 if (is_concurrent_mark_in_progress()) {
2137 // Ignore request to deactivate barrier while concurrent mark is in progress.
2138 // Do not attempt to re-activate the barrier if it is already active.
2139 if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2140 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2141 }
2142 } else {
2143 // No concurrent marking is in progress so honor request to deactivate,
2144 // but only if the barrier is already active.
2145 if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2146 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2147 }
2148 }
2149 }
2150
2151 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2152 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2153 set_gc_state_at_safepoint(EVACUATION, in_progress);
2154 }
2155
2156 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2157 if (in_progress) {
2158 _concurrent_strong_root_in_progress.set();
2159 } else {
2160 _concurrent_strong_root_in_progress.unset();
2161 }
2162 }
2163
2164 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2165 set_gc_state_at_safepoint(WEAK_ROOTS, cond);
2166 }
2167
2168 GCTracer* ShenandoahHeap::tracer() {
2169 return shenandoah_policy()->tracer();
2170 }
2171
2172 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2173 return _free_set->used();
2174 }
2175
2176 bool ShenandoahHeap::try_cancel_gc(GCCause::Cause cause) {
2177 const GCCause::Cause prev = _cancelled_gc.xchg(cause);
2178 return prev == GCCause::_no_gc || prev == GCCause::_shenandoah_concurrent_gc;
2179 }
2180
2181 void ShenandoahHeap::cancel_concurrent_mark() {
2182 if (mode()->is_generational()) {
2183 young_generation()->cancel_marking();
2184 old_generation()->cancel_marking();
2185 }
2186
2187 global_generation()->cancel_marking();
2188
2189 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2190 }
2191
2192 bool ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2193 if (try_cancel_gc(cause)) {
2194 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2195 log_info(gc,thread)("%s", msg.buffer());
2196 Events::log(Thread::current(), "%s", msg.buffer());
2197 _cancel_requested_time = os::elapsedTime();
2198 return true;
2199 }
2200 return false;
2201 }
2202
2203 uint ShenandoahHeap::max_workers() {
2204 return _max_workers;
2205 }
2206
2207 void ShenandoahHeap::stop() {
2208 // The shutdown sequence should be able to terminate when GC is running.
2209
2210 // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2211 _shenandoah_policy->record_shutdown();
2212
2213 // Step 1. Stop reporting on gc thread cpu utilization
2214 mmu_tracker()->stop();
2215
2216 // Step 2. Wait until GC worker exits normally (this will cancel any ongoing GC).
2217 control_thread()->stop();
2218
2219 // Stop 4. Shutdown uncommit thread.
2220 if (_uncommit_thread != nullptr) {
2221 _uncommit_thread->stop();
2222 }
2223 }
2224
2225 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2226 if (!unload_classes()) return;
2227 ClassUnloadingContext ctx(_workers->active_workers(),
2228 true /* unregister_nmethods_during_purge */,
2229 false /* lock_nmethod_free_separately */);
2230
2231 // Unload classes and purge SystemDictionary.
2232 {
2233 ShenandoahPhaseTimings::Phase phase = full_gc ?
2234 ShenandoahPhaseTimings::full_gc_purge_class_unload :
2235 ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2236 ShenandoahIsAliveSelector is_alive;
2237 {
2238 CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
2239 ShenandoahGCPhase gc_phase(phase);
2240 ShenandoahGCWorkerPhase worker_phase(phase);
2241 bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
2242
2243 // Clean JVMCI metadata handles.
2244 JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
2245
2246 uint num_workers = _workers->active_workers();
2247 ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
2248 _workers->run_task(&unlink_task);
2249 }
2250 // Release unloaded nmethods's memory.
2251 ClassUnloadingContext::context()->purge_and_free_nmethods();
2252 }
2253
2254 {
2255 ShenandoahGCPhase phase(full_gc ?
2256 ShenandoahPhaseTimings::full_gc_purge_cldg :
2257 ShenandoahPhaseTimings::degen_gc_purge_cldg);
2258 ClassLoaderDataGraph::purge(true /* at_safepoint */);
2259 }
2260 // Resize and verify metaspace
2261 MetaspaceGC::compute_new_size();
2262 DEBUG_ONLY(MetaspaceUtils::verify();)
2263 }
2264
2265 // Weak roots are either pre-evacuated (final mark) or updated (final update refs),
2266 // so they should not have forwarded oops.
2267 // However, we do need to "null" dead oops in the roots, if can not be done
2268 // in concurrent cycles.
2269 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2270 uint num_workers = _workers->active_workers();
2271 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2272 ShenandoahPhaseTimings::full_gc_purge_weak_par :
2273 ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2274 ShenandoahGCPhase phase(timing_phase);
2275 ShenandoahGCWorkerPhase worker_phase(timing_phase);
2276 // Cleanup weak roots
2277 if (has_forwarded_objects()) {
2278 ShenandoahForwardedIsAliveClosure is_alive;
2279 ShenandoahNonConcUpdateRefsClosure keep_alive;
2280 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahNonConcUpdateRefsClosure>
2281 cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2282 _workers->run_task(&cleaning_task);
2283 } else {
2284 ShenandoahIsAliveClosure is_alive;
2285 #ifdef ASSERT
2286 ShenandoahAssertNotForwardedClosure verify_cl;
2287 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2288 cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2289 #else
2290 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2291 cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2292 #endif
2293 _workers->run_task(&cleaning_task);
2294 }
2295 }
2296
2297 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2298 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2299 assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2300 ShenandoahGCPhase phase(full_gc ?
2301 ShenandoahPhaseTimings::full_gc_purge :
2302 ShenandoahPhaseTimings::degen_gc_purge);
2303 stw_weak_refs(full_gc);
2304 stw_process_weak_roots(full_gc);
2305 stw_unload_classes(full_gc);
2306 }
2307
2308 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2309 set_gc_state_at_safepoint(HAS_FORWARDED, cond);
2310 }
2311
2312 void ShenandoahHeap::set_unload_classes(bool uc) {
2313 _unload_classes.set_cond(uc);
2314 }
2315
2316 bool ShenandoahHeap::unload_classes() const {
2317 return _unload_classes.is_set();
2318 }
2319
2320 address ShenandoahHeap::in_cset_fast_test_addr() {
2321 ShenandoahHeap* heap = ShenandoahHeap::heap();
2322 assert(heap->collection_set() != nullptr, "Sanity");
2323 return (address) heap->collection_set()->biased_map_address();
2324 }
2325
2326 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2327 if (mode()->is_generational()) {
2328 young_generation()->reset_bytes_allocated_since_gc_start();
2329 old_generation()->reset_bytes_allocated_since_gc_start();
2330 }
2331
2332 global_generation()->reset_bytes_allocated_since_gc_start();
2333 }
2334
2335 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2336 _degenerated_gc_in_progress.set_cond(in_progress);
2337 }
2338
2339 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2340 _full_gc_in_progress.set_cond(in_progress);
2341 }
2342
2343 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2344 assert (is_full_gc_in_progress(), "should be");
2345 _full_gc_move_in_progress.set_cond(in_progress);
2346 }
2347
2348 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2349 set_gc_state_at_safepoint(UPDATE_REFS, in_progress);
2350 }
2351
2352 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2353 ShenandoahCodeRoots::register_nmethod(nm);
2354 }
2355
2356 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2357 ShenandoahCodeRoots::unregister_nmethod(nm);
2358 }
2359
2360 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2361 heap_region_containing(o)->record_pin();
2362 }
2363
2364 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2365 ShenandoahHeapRegion* r = heap_region_containing(o);
2366 assert(r != nullptr, "Sanity");
2367 assert(r->pin_count() > 0, "Region %zu should have non-zero pins", r->index());
2368 r->record_unpin();
2369 }
2370
2371 void ShenandoahHeap::sync_pinned_region_status() {
2372 ShenandoahHeapLocker locker(lock());
2373
2374 for (size_t i = 0; i < num_regions(); i++) {
2375 ShenandoahHeapRegion *r = get_region(i);
2376 if (r->is_active()) {
2377 if (r->is_pinned()) {
2378 if (r->pin_count() == 0) {
2379 r->make_unpinned();
2380 }
2381 } else {
2382 if (r->pin_count() > 0) {
2383 r->make_pinned();
2384 }
2385 }
2386 }
2387 }
2388
2389 assert_pinned_region_status();
2390 }
2391
2392 #ifdef ASSERT
2393 void ShenandoahHeap::assert_pinned_region_status() {
2394 for (size_t i = 0; i < num_regions(); i++) {
2395 ShenandoahHeapRegion* r = get_region(i);
2396 shenandoah_assert_generations_reconciled();
2397 if (gc_generation()->contains(r)) {
2398 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2399 "Region %zu pinning status is inconsistent", i);
2400 }
2401 }
2402 }
2403 #endif
2404
2405 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2406 return _gc_timer;
2407 }
2408
2409 void ShenandoahHeap::prepare_concurrent_roots() {
2410 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2411 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2412 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2413 set_concurrent_weak_root_in_progress(true);
2414 if (unload_classes()) {
2415 _unloader.prepare();
2416 }
2417 }
2418
2419 void ShenandoahHeap::finish_concurrent_roots() {
2420 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2421 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2422 if (unload_classes()) {
2423 _unloader.finish();
2424 }
2425 }
2426
2427 #ifdef ASSERT
2428 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2429 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2430
2431 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2432 // Use ParallelGCThreads inside safepoints
2433 assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2434 ParallelGCThreads, nworkers);
2435 } else {
2436 // Use ConcGCThreads outside safepoints
2437 assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2438 ConcGCThreads, nworkers);
2439 }
2440 }
2441 #endif
2442
2443 ShenandoahVerifier* ShenandoahHeap::verifier() {
2444 guarantee(ShenandoahVerify, "Should be enabled");
2445 assert (_verifier != nullptr, "sanity");
2446 return _verifier;
2447 }
2448
2449 template<bool CONCURRENT>
2450 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2451 private:
2452 ShenandoahHeap* _heap;
2453 ShenandoahRegionIterator* _regions;
2454 public:
2455 explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2456 WorkerTask("Shenandoah Update References"),
2457 _heap(ShenandoahHeap::heap()),
2458 _regions(regions) {
2459 }
2460
2461 void work(uint worker_id) {
2462 if (CONCURRENT) {
2463 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2464 ShenandoahSuspendibleThreadSetJoiner stsj;
2465 do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2466 } else {
2467 ShenandoahParallelWorkerSession worker_session(worker_id);
2468 do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
2469 }
2470 }
2471
2472 private:
2473 template<class T>
2474 void do_work(uint worker_id) {
2475 if (CONCURRENT && (worker_id == 0)) {
2476 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2477 // results of evacuation. These reserves are no longer necessary because evacuation has completed.
2478 size_t cset_regions = _heap->collection_set()->count();
2479
2480 // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2481 // to the mutator free set. At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2482 // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2483 // next GC cycle.
2484 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2485 }
2486 // If !CONCURRENT, there's no value in expanding Mutator free set
2487 T cl;
2488 ShenandoahHeapRegion* r = _regions->next();
2489 while (r != nullptr) {
2490 HeapWord* update_watermark = r->get_update_watermark();
2491 assert (update_watermark >= r->bottom(), "sanity");
2492 if (r->is_active() && !r->is_cset()) {
2493 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2494 if (ShenandoahPacing) {
2495 _heap->pacer()->report_update_refs(pointer_delta(update_watermark, r->bottom()));
2496 }
2497 }
2498 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2499 return;
2500 }
2501 r = _regions->next();
2502 }
2503 }
2504 };
2505
2506 void ShenandoahHeap::update_heap_references(bool concurrent) {
2507 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2508
2509 if (concurrent) {
2510 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2511 workers()->run_task(&task);
2512 } else {
2513 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2514 workers()->run_task(&task);
2515 }
2516 }
2517
2518 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2519 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2520 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2521
2522 {
2523 ShenandoahGCPhase phase(concurrent ?
2524 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2525 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2526
2527 final_update_refs_update_region_states();
2528
2529 assert_pinned_region_status();
2530 }
2531
2532 {
2533 ShenandoahGCPhase phase(concurrent ?
2534 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2535 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2536 trash_cset_regions();
2537 }
2538 }
2539
2540 void ShenandoahHeap::final_update_refs_update_region_states() {
2541 ShenandoahSynchronizePinnedRegionStates cl;
2542 parallel_heap_region_iterate(&cl);
2543 }
2544
2545 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2546 ShenandoahGCPhase phase(concurrent ?
2547 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2548 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2549 ShenandoahHeapLocker locker(lock());
2550 size_t young_cset_regions, old_cset_regions;
2551 size_t first_old_region, last_old_region, old_region_count;
2552 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2553 // If there are no old regions, first_old_region will be greater than last_old_region
2554 assert((first_old_region > last_old_region) ||
2555 ((last_old_region + 1 - first_old_region >= old_region_count) &&
2556 get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2557 "sanity: old_region_count: %zu, first_old_region: %zu, last_old_region: %zu",
2558 old_region_count, first_old_region, last_old_region);
2559
2560 if (mode()->is_generational()) {
2561 #ifdef ASSERT
2562 if (ShenandoahVerify) {
2563 verifier()->verify_before_rebuilding_free_set();
2564 }
2565 #endif
2566
2567 // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2568 // available for transfer to old. Note that transfer of humongous regions does not impact available.
2569 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2570 size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2571 gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2572
2573 // Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
2574 // memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
2575 // regions in place when many of these regular regions have an abundant amount of available memory within them. Fragmentation
2576 // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2577 //
2578 // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2579 // within partially consumed regions of memory.
2580 }
2581 // Rebuild free set based on adjusted generation sizes.
2582 _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2583
2584 if (mode()->is_generational()) {
2585 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2586 ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2587 old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2588 }
2589 }
2590
2591 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2592 size_t slice = r->index() / _bitmap_regions_per_slice;
2593
2594 size_t regions_from = _bitmap_regions_per_slice * slice;
2595 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2596 for (size_t g = regions_from; g < regions_to; g++) {
2597 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2598 if (skip_self && g == r->index()) continue;
2599 if (get_region(g)->is_committed()) {
2600 return true;
2601 }
2602 }
2603 return false;
2604 }
2605
2606 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2607 shenandoah_assert_heaplocked();
2608
2609 // Bitmaps in special regions do not need commits
2610 if (_bitmap_region_special) {
2611 return true;
2612 }
2613
2614 if (is_bitmap_slice_committed(r, true)) {
2615 // Some other region from the group is already committed, meaning the bitmap
2616 // slice is already committed, we exit right away.
2617 return true;
2618 }
2619
2620 // Commit the bitmap slice:
2621 size_t slice = r->index() / _bitmap_regions_per_slice;
2622 size_t off = _bitmap_bytes_per_slice * slice;
2623 size_t len = _bitmap_bytes_per_slice;
2624 char* start = (char*) _bitmap_region.start() + off;
2625
2626 if (!os::commit_memory(start, len, false)) {
2627 return false;
2628 }
2629
2630 if (AlwaysPreTouch) {
2631 os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2632 }
2633
2634 return true;
2635 }
2636
2637 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2638 shenandoah_assert_heaplocked();
2639
2640 // Bitmaps in special regions do not need uncommits
2641 if (_bitmap_region_special) {
2642 return true;
2643 }
2644
2645 if (is_bitmap_slice_committed(r, true)) {
2646 // Some other region from the group is still committed, meaning the bitmap
2647 // slice should stay committed, exit right away.
2648 return true;
2649 }
2650
2651 // Uncommit the bitmap slice:
2652 size_t slice = r->index() / _bitmap_regions_per_slice;
2653 size_t off = _bitmap_bytes_per_slice * slice;
2654 size_t len = _bitmap_bytes_per_slice;
2655 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2656 return false;
2657 }
2658 return true;
2659 }
2660
2661 void ShenandoahHeap::forbid_uncommit() {
2662 if (_uncommit_thread != nullptr) {
2663 _uncommit_thread->forbid_uncommit();
2664 }
2665 }
2666
2667 void ShenandoahHeap::allow_uncommit() {
2668 if (_uncommit_thread != nullptr) {
2669 _uncommit_thread->allow_uncommit();
2670 }
2671 }
2672
2673 #ifdef ASSERT
2674 bool ShenandoahHeap::is_uncommit_in_progress() {
2675 if (_uncommit_thread != nullptr) {
2676 return _uncommit_thread->is_uncommit_in_progress();
2677 }
2678 return false;
2679 }
2680 #endif
2681
2682 void ShenandoahHeap::safepoint_synchronize_begin() {
2683 StackWatermarkSet::safepoint_synchronize_begin();
2684 SuspendibleThreadSet::synchronize();
2685 }
2686
2687 void ShenandoahHeap::safepoint_synchronize_end() {
2688 SuspendibleThreadSet::desynchronize();
2689 }
2690
2691 void ShenandoahHeap::try_inject_alloc_failure() {
2692 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2693 _inject_alloc_failure.set();
2694 os::naked_short_sleep(1);
2695 if (cancelled_gc()) {
2696 log_info(gc)("Allocation failure was successfully injected");
2697 }
2698 }
2699 }
2700
2701 bool ShenandoahHeap::should_inject_alloc_failure() {
2702 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2703 }
2704
2705 void ShenandoahHeap::initialize_serviceability() {
2706 _memory_pool = new ShenandoahMemoryPool(this);
2707 _cycle_memory_manager.add_pool(_memory_pool);
2708 _stw_memory_manager.add_pool(_memory_pool);
2709 }
2710
2711 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2712 GrowableArray<GCMemoryManager*> memory_managers(2);
2713 memory_managers.append(&_cycle_memory_manager);
2714 memory_managers.append(&_stw_memory_manager);
2715 return memory_managers;
2716 }
2717
2718 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2719 GrowableArray<MemoryPool*> memory_pools(1);
2720 memory_pools.append(_memory_pool);
2721 return memory_pools;
2722 }
2723
2724 MemoryUsage ShenandoahHeap::memory_usage() {
2725 return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2726 }
2727
2728 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2729 _heap(ShenandoahHeap::heap()),
2730 _index(0) {}
2731
2732 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2733 _heap(heap),
2734 _index(0) {}
2735
2736 void ShenandoahRegionIterator::reset() {
2737 _index = 0;
2738 }
2739
2740 bool ShenandoahRegionIterator::has_next() const {
2741 return _index < _heap->num_regions();
2742 }
2743
2744 char ShenandoahHeap::gc_state() const {
2745 return _gc_state.raw_value();
2746 }
2747
2748 bool ShenandoahHeap::is_gc_state(GCState state) const {
2749 // If the global gc state has been changed, but hasn't yet been propagated to all threads, then
2750 // the global gc state is the correct value. Once the gc state has been synchronized with all threads,
2751 // _gc_state_changed will be toggled to false and we need to use the thread local state.
2752 return _gc_state_changed ? _gc_state.is_set(state) : ShenandoahThreadLocalData::is_gc_state(state);
2753 }
2754
2755
2756 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2757 #ifdef ASSERT
2758 assert(_liveness_cache != nullptr, "sanity");
2759 assert(worker_id < _max_workers, "sanity");
2760 for (uint i = 0; i < num_regions(); i++) {
2761 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2762 }
2763 #endif
2764 return _liveness_cache[worker_id];
2765 }
2766
2767 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2768 assert(worker_id < _max_workers, "sanity");
2769 assert(_liveness_cache != nullptr, "sanity");
2770 ShenandoahLiveData* ld = _liveness_cache[worker_id];
2771 for (uint i = 0; i < num_regions(); i++) {
2772 ShenandoahLiveData live = ld[i];
2773 if (live > 0) {
2774 ShenandoahHeapRegion* r = get_region(i);
2775 r->increase_live_data_gc_words(live);
2776 ld[i] = 0;
2777 }
2778 }
2779 }
2780
2781 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2782 if (is_idle()) return false;
2783
2784 // Objects allocated after marking start are implicitly alive, don't need any barriers during
2785 // marking phase.
2786 if (is_concurrent_mark_in_progress() &&
2787 !marking_context()->allocated_after_mark_start(obj)) {
2788 return true;
2789 }
2790
2791 // Can not guarantee obj is deeply good.
2792 if (has_forwarded_objects()) {
2793 return true;
2794 }
2795
2796 return false;
2797 }
2798
2799 HeapWord* ShenandoahHeap::allocate_loaded_archive_space(size_t size) {
2800 #if INCLUDE_CDS_JAVA_HEAP
2801 // CDS wants a continuous memory range to load a bunch of objects.
2802 // This effectively bypasses normal allocation paths, and requires
2803 // a bit of massaging to unbreak GC invariants.
2804
2805 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
2806
2807 // Easy case: a single regular region, no further adjustments needed.
2808 if (!ShenandoahHeapRegion::requires_humongous(size)) {
2809 return allocate_memory(req);
2810 }
2811
2812 // Hard case: the requested size would cause a humongous allocation.
2813 // We need to make sure it looks like regular allocation to the rest of GC.
2814
2815 // CDS code would guarantee no objects straddle multiple regions, as long as
2816 // regions are as large as MIN_GC_REGION_ALIGNMENT. It is impractical at this
2817 // point to deal with case when Shenandoah runs with smaller regions.
2818 // TODO: This check can be dropped once MIN_GC_REGION_ALIGNMENT agrees more with Shenandoah.
2819 if (ShenandoahHeapRegion::region_size_bytes() < ArchiveHeapWriter::MIN_GC_REGION_ALIGNMENT) {
2820 return nullptr;
2821 }
2822
2823 HeapWord* mem = allocate_memory(req);
2824 size_t start_idx = heap_region_index_containing(mem);
2825 size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
2826
2827 // Flip humongous -> regular.
2828 {
2829 ShenandoahHeapLocker locker(lock(), false);
2830 for (size_t c = start_idx; c < start_idx + num_regions; c++) {
2831 get_region(c)->make_regular_bypass();
2832 }
2833 }
2834
2835 return mem;
2836 #else
2837 assert(false, "Archive heap loader should not be available, should not be here");
2838 return nullptr;
2839 #endif // INCLUDE_CDS_JAVA_HEAP
2840 }
2841
2842 void ShenandoahHeap::complete_loaded_archive_space(MemRegion archive_space) {
2843 // Nothing to do here, except checking that heap looks fine.
2844 #ifdef ASSERT
2845 HeapWord* start = archive_space.start();
2846 HeapWord* end = archive_space.end();
2847
2848 // No unclaimed space between the objects.
2849 // Objects are properly allocated in correct regions.
2850 HeapWord* cur = start;
2851 while (cur < end) {
2852 oop oop = cast_to_oop(cur);
2853 shenandoah_assert_in_correct_region(nullptr, oop);
2854 cur += oop->size();
2855 }
2856
2857 // No unclaimed tail at the end of archive space.
2858 assert(cur == end,
2859 "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT,
2860 p2i(cur), p2i(end));
2861
2862 // Region bounds are good.
2863 ShenandoahHeapRegion* begin_reg = heap_region_containing(start);
2864 ShenandoahHeapRegion* end_reg = heap_region_containing(end);
2865 assert(begin_reg->is_regular(), "Must be");
2866 assert(end_reg->is_regular(), "Must be");
2867 assert(begin_reg->bottom() == start,
2868 "Must agree: archive-space-start: " PTR_FORMAT ", begin-region-bottom: " PTR_FORMAT,
2869 p2i(start), p2i(begin_reg->bottom()));
2870 assert(end_reg->top() == end,
2871 "Must agree: archive-space-end: " PTR_FORMAT ", end-region-top: " PTR_FORMAT,
2872 p2i(end), p2i(end_reg->top()));
2873 #endif
2874 }
2875
2876 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2877 if (!mode()->is_generational()) {
2878 return global_generation();
2879 } else if (affiliation == YOUNG_GENERATION) {
2880 return young_generation();
2881 } else if (affiliation == OLD_GENERATION) {
2882 return old_generation();
2883 }
2884
2885 ShouldNotReachHere();
2886 return nullptr;
2887 }
2888
2889 void ShenandoahHeap::log_heap_status(const char* msg) const {
2890 if (mode()->is_generational()) {
2891 young_generation()->log_status(msg);
2892 old_generation()->log_status(msg);
2893 } else {
2894 global_generation()->log_status(msg);
2895 }
2896 }