1 /*
2 * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27
28 #include "cds/aotMappedHeapWriter.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "gc/shared/classUnloadingContext.hpp"
31 #include "gc/shared/fullGCForwarding.hpp"
32 #include "gc/shared/gc_globals.hpp"
33 #include "gc/shared/gcArguments.hpp"
34 #include "gc/shared/gcTimer.hpp"
35 #include "gc/shared/gcTraceTime.inline.hpp"
36 #include "gc/shared/locationPrinter.inline.hpp"
37 #include "gc/shared/memAllocator.hpp"
38 #include "gc/shared/plab.hpp"
39 #include "gc/shared/tlab_globals.hpp"
40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
42 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
43 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
44 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
45 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
46 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
47 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
48 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
49 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
50 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
51 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
52 #include "gc/shenandoah/shenandoahControlThread.hpp"
53 #include "gc/shenandoah/shenandoahFreeSet.hpp"
54 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
55 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
56 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
57 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
58 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
59 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
60 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
61 #include "gc/shenandoah/shenandoahInitLogger.hpp"
62 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
63 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
64 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
65 #include "gc/shenandoah/shenandoahObjArrayAllocator.hpp"
66 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
67 #include "gc/shenandoah/shenandoahPadding.hpp"
68 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
69 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
70 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
71 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
72 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
73 #include "gc/shenandoah/shenandoahSTWMark.hpp"
74 #include "gc/shenandoah/shenandoahUncommitThread.hpp"
75 #include "gc/shenandoah/shenandoahUtils.hpp"
76 #include "gc/shenandoah/shenandoahVerifier.hpp"
77 #include "gc/shenandoah/shenandoahVMOperations.hpp"
78 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
79 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
80 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
81 #include "memory/allocation.hpp"
82 #include "memory/classLoaderMetaspace.hpp"
83 #include "memory/memoryReserver.hpp"
84 #include "memory/metaspaceUtils.hpp"
85 #include "memory/universe.hpp"
86 #include "nmt/mallocTracker.hpp"
87 #include "nmt/memTracker.hpp"
88 #include "oops/compressedOops.inline.hpp"
89 #include "prims/jvmtiTagMap.hpp"
90 #include "runtime/atomic.hpp"
91 #include "runtime/atomicAccess.hpp"
92 #include "runtime/globals.hpp"
93 #include "runtime/interfaceSupport.inline.hpp"
94 #include "runtime/java.hpp"
95 #include "runtime/orderAccess.hpp"
96 #include "runtime/safepointMechanism.hpp"
97 #include "runtime/stackWatermarkSet.hpp"
98 #include "runtime/threads.hpp"
99 #include "runtime/vmThread.hpp"
100 #include "utilities/events.hpp"
101 #include "utilities/globalDefinitions.hpp"
102 #include "utilities/powerOfTwo.hpp"
103 #if INCLUDE_JVMCI
104 #include "jvmci/jvmci.hpp"
105 #endif
106 #if INCLUDE_JFR
107 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
108 #endif
109
110 class ShenandoahPretouchHeapTask : public WorkerTask {
111 private:
112 ShenandoahRegionIterator _regions;
113 const size_t _page_size;
114 public:
115 ShenandoahPretouchHeapTask(size_t page_size) :
116 WorkerTask("Shenandoah Pretouch Heap"),
117 _page_size(page_size) {}
118
119 virtual void work(uint worker_id) {
120 ShenandoahHeapRegion* r = _regions.next();
121 while (r != nullptr) {
122 if (r->is_committed()) {
123 os::pretouch_memory(r->bottom(), r->end(), _page_size);
124 }
125 r = _regions.next();
126 }
127 }
128 };
129
130 class ShenandoahPretouchBitmapTask : public WorkerTask {
131 private:
132 ShenandoahRegionIterator _regions;
133 char* _bitmap_base;
134 const size_t _bitmap_size;
135 const size_t _page_size;
136 public:
137 ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
138 WorkerTask("Shenandoah Pretouch Bitmap"),
139 _bitmap_base(bitmap_base),
140 _bitmap_size(bitmap_size),
141 _page_size(page_size) {}
142
143 virtual void work(uint worker_id) {
144 ShenandoahHeapRegion* r = _regions.next();
145 while (r != nullptr) {
146 size_t start = r->index() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
147 size_t end = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
148 assert (end <= _bitmap_size, "end is sane: %zu < %zu", end, _bitmap_size);
149
150 if (r->is_committed()) {
151 os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
152 }
153
154 r = _regions.next();
155 }
156 }
157 };
158
159 static ReservedSpace reserve(size_t size, size_t preferred_page_size) {
160 // When a page size is given we don't want to mix large
161 // and normal pages. If the size is not a multiple of the
162 // page size it will be aligned up to achieve this.
163 size_t alignment = os::vm_allocation_granularity();
164 if (preferred_page_size != os::vm_page_size()) {
165 alignment = MAX2(preferred_page_size, alignment);
166 size = align_up(size, alignment);
167 }
168
169 const ReservedSpace reserved = MemoryReserver::reserve(size, alignment, preferred_page_size, mtGC);
170 if (!reserved.is_reserved()) {
171 vm_exit_during_initialization("Could not reserve space");
172 }
173 return reserved;
174 }
175
176 jint ShenandoahHeap::initialize() {
177 //
178 // Figure out heap sizing
179 //
180
181 size_t init_byte_size = InitialHeapSize;
182 size_t min_byte_size = MinHeapSize;
183 size_t max_byte_size = MaxHeapSize;
184 size_t heap_alignment = HeapAlignment;
185
186 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
187
188 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
189 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
190
191 _num_regions = ShenandoahHeapRegion::region_count();
192 assert(_num_regions == (max_byte_size / reg_size_bytes),
193 "Regions should cover entire heap exactly: %zu != %zu/%zu",
194 _num_regions, max_byte_size, reg_size_bytes);
195
196 size_t num_committed_regions = init_byte_size / reg_size_bytes;
197 num_committed_regions = MIN2(num_committed_regions, _num_regions);
198 assert(num_committed_regions <= _num_regions, "sanity");
199 _initial_size = num_committed_regions * reg_size_bytes;
200
201 size_t num_min_regions = min_byte_size / reg_size_bytes;
202 num_min_regions = MIN2(num_min_regions, _num_regions);
203 assert(num_min_regions <= _num_regions, "sanity");
204 _minimum_size = num_min_regions * reg_size_bytes;
205
206 _soft_max_size.store_relaxed(clamp(SoftMaxHeapSize, min_capacity(), max_capacity()));
207
208 _committed.store_relaxed(_initial_size);
209
210 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
211 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
212 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
213
214 //
215 // Reserve and commit memory for heap
216 //
217
218 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
219 initialize_reserved_region(heap_rs);
220 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
221 _heap_region_special = heap_rs.special();
222
223 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
224 "Misaligned heap: " PTR_FORMAT, p2i(base()));
225 os::trace_page_sizes_for_requested_size("Heap",
226 max_byte_size, heap_alignment,
227 heap_rs.base(),
228 heap_rs.size(), heap_rs.page_size());
229
230 #if SHENANDOAH_OPTIMIZED_MARKTASK
231 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
232 // Fail if we ever attempt to address more than we can.
233 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
234 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
235 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
236 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
237 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
238 vm_exit_during_initialization("Fatal Error", buf);
239 }
240 #endif
241
242 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
243 if (!_heap_region_special) {
244 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
245 "Cannot commit heap memory");
246 }
247
248 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
249
250 // Now we know the number of regions and heap sizes, initialize the heuristics.
251 initialize_heuristics();
252
253 // If ShenandoahCardBarrier is enabled but it's not generational mode
254 // it means we're under passive mode and we have to initialize old gen
255 // for the purpose of having card table.
256 if (ShenandoahCardBarrier && !(mode()->is_generational())) {
257 _old_generation = new ShenandoahOldGeneration(max_workers());
258 }
259
260 assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
261
262 //
263 // Worker threads must be initialized after the barrier is configured
264 //
265 _workers = new ShenandoahWorkerThreads("ShenWorker", _max_workers);
266 if (_workers == nullptr) {
267 vm_exit_during_initialization("Failed necessary allocation.");
268 } else {
269 _workers->initialize_workers();
270 }
271
272 if (ParallelGCThreads > 1) {
273 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
274 _safepoint_workers->initialize_workers();
275 }
276
277 //
278 // Reserve and commit memory for bitmap(s)
279 //
280
281 size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
282 _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
283
284 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
285
286 guarantee(bitmap_bytes_per_region != 0,
287 "Bitmap bytes per region should not be zero");
288 guarantee(is_power_of_2(bitmap_bytes_per_region),
289 "Bitmap bytes per region should be power of two: %zu", bitmap_bytes_per_region);
290
291 if (bitmap_page_size > bitmap_bytes_per_region) {
292 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
293 _bitmap_bytes_per_slice = bitmap_page_size;
294 } else {
295 _bitmap_regions_per_slice = 1;
296 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
297 }
298
299 guarantee(_bitmap_regions_per_slice >= 1,
300 "Should have at least one region per slice: %zu",
301 _bitmap_regions_per_slice);
302
303 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
304 "Bitmap slices should be page-granular: bps = %zu, page size = %zu",
305 _bitmap_bytes_per_slice, bitmap_page_size);
306
307 ReservedSpace bitmap = reserve(_bitmap_size, bitmap_page_size);
308 os::trace_page_sizes_for_requested_size("Mark Bitmap",
309 bitmap_size_orig, bitmap_page_size,
310 bitmap.base(),
311 bitmap.size(), bitmap.page_size());
312 MemTracker::record_virtual_memory_tag(bitmap, mtGC);
313 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
314 _bitmap_region_special = bitmap.special();
315
316 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
317 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
318 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
319 if (!_bitmap_region_special) {
320 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
321 "Cannot commit bitmap memory");
322 }
323
324 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
325
326 if (ShenandoahVerify) {
327 ReservedSpace verify_bitmap = reserve(_bitmap_size, bitmap_page_size);
328 os::trace_page_sizes_for_requested_size("Verify Bitmap",
329 bitmap_size_orig, bitmap_page_size,
330 verify_bitmap.base(),
331 verify_bitmap.size(), verify_bitmap.page_size());
332 if (!verify_bitmap.special()) {
333 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
334 "Cannot commit verification bitmap memory");
335 }
336 MemTracker::record_virtual_memory_tag(verify_bitmap, mtGC);
337 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
338 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
339 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
340 }
341
342 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
343 size_t aux_bitmap_page_size = bitmap_page_size;
344
345 ReservedSpace aux_bitmap = reserve(_bitmap_size, aux_bitmap_page_size);
346 os::trace_page_sizes_for_requested_size("Aux Bitmap",
347 bitmap_size_orig, aux_bitmap_page_size,
348 aux_bitmap.base(),
349 aux_bitmap.size(), aux_bitmap.page_size());
350 MemTracker::record_virtual_memory_tag(aux_bitmap, mtGC);
351 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
352 _aux_bitmap_region_special = aux_bitmap.special();
353 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
354
355 //
356 // Create regions and region sets
357 //
358 size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
359 size_t region_storage_size_orig = region_align * _num_regions;
360 size_t region_storage_size = align_up(region_storage_size_orig,
361 MAX2(region_page_size, os::vm_allocation_granularity()));
362
363 ReservedSpace region_storage = reserve(region_storage_size, region_page_size);
364 os::trace_page_sizes_for_requested_size("Region Storage",
365 region_storage_size_orig, region_page_size,
366 region_storage.base(),
367 region_storage.size(), region_storage.page_size());
368 MemTracker::record_virtual_memory_tag(region_storage, mtGC);
369 if (!region_storage.special()) {
370 os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
371 "Cannot commit region memory");
372 }
373
374 // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
375 // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
376 // If not successful, bite a bullet and allocate at whatever address.
377 {
378 const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
379 const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
380 const size_t cset_page_size = os::vm_page_size();
381
382 uintptr_t min = round_up_power_of_2(cset_align);
383 uintptr_t max = (1u << 30u);
384 ReservedSpace cset_rs;
385
386 for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
387 char* req_addr = (char*)addr;
388 assert(is_aligned(req_addr, cset_align), "Should be aligned");
389 cset_rs = MemoryReserver::reserve(req_addr, cset_size, cset_align, cset_page_size, mtGC);
390 if (cset_rs.is_reserved()) {
391 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
392 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
393 break;
394 }
395 }
396
397 if (_collection_set == nullptr) {
398 cset_rs = MemoryReserver::reserve(cset_size, cset_align, os::vm_page_size(), mtGC);
399 if (!cset_rs.is_reserved()) {
400 vm_exit_during_initialization("Cannot reserve memory for collection set");
401 }
402
403 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
404 }
405 os::trace_page_sizes_for_requested_size("Collection Set",
406 cset_size, cset_page_size,
407 cset_rs.base(),
408 cset_rs.size(), cset_rs.page_size());
409 }
410
411 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
412 _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
413
414 {
415 ShenandoahHeapLocker locker(lock());
416 for (size_t i = 0; i < _num_regions; i++) {
417 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
418 bool is_committed = i < num_committed_regions;
419 void* loc = region_storage.base() + i * region_align;
420
421 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
422 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
423
424 _marking_context->initialize_top_at_mark_start(r);
425 _regions[i] = r;
426 assert(!collection_set()->is_in(i), "New region should not be in collection set");
427
428 _affiliations[i] = ShenandoahAffiliation::FREE;
429 }
430
431 if (mode()->is_generational()) {
432 size_t young_reserve = (soft_max_capacity() * ShenandoahEvacReserve) / 100;
433 young_generation()->set_evacuation_reserve(young_reserve);
434 old_generation()->set_evacuation_reserve((size_t) 0);
435 old_generation()->set_promoted_reserve((size_t) 0);
436 }
437
438 _free_set = new ShenandoahFreeSet(this, _num_regions);
439 initialize_generations();
440
441 // We are initializing free set. We ignore cset region tallies.
442 size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
443 _free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
444 if (mode()->is_generational()) {
445 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
446 // We cannot call
447 // gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions)
448 // until after the heap is fully initialized. So we make up a safe value here.
449 size_t allocation_runway = InitialHeapSize / 2;
450 gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
451 }
452 _free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
453 }
454
455 if (AlwaysPreTouch) {
456 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
457 // before initialize() below zeroes it with initializing thread. For any given region,
458 // we touch the region and the corresponding bitmaps from the same thread.
459 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
460
461 _pretouch_heap_page_size = heap_page_size;
462 _pretouch_bitmap_page_size = bitmap_page_size;
463
464 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
465 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
466
467 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
468 _workers->run_task(&bcl);
469
470 ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
471 _workers->run_task(&hcl);
472 }
473
474 //
475 // Initialize the rest of GC subsystems
476 //
477
478 _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
479 for (uint worker = 0; worker < _max_workers; worker++) {
480 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
481 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
482 }
483
484 // There should probably be Shenandoah-specific options for these,
485 // just as there are G1-specific options.
486 {
487 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
488 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
489 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
490 }
491
492 _monitoring_support = new ShenandoahMonitoringSupport(this);
493 _phase_timings = new ShenandoahPhaseTimings(max_workers());
494 ShenandoahCodeRoots::initialize();
495
496 // Initialization of controller makes use of variables established by initialize_heuristics.
497 initialize_controller();
498
499 // Certain initialization of heuristics must be deferred until after controller is initialized.
500 post_initialize_heuristics();
501 start_idle_span();
502 if (ShenandoahUncommit) {
503 _uncommit_thread = new ShenandoahUncommitThread(this);
504 }
505 print_init_logger();
506 FullGCForwarding::initialize(_heap_region);
507 return JNI_OK;
508 }
509
510 void ShenandoahHeap::initialize_controller() {
511 _control_thread = new ShenandoahControlThread();
512 }
513
514 void ShenandoahHeap::print_init_logger() const {
515 ShenandoahInitLogger::print();
516 }
517
518 void ShenandoahHeap::initialize_mode() {
519 if (ShenandoahGCMode != nullptr) {
520 if (strcmp(ShenandoahGCMode, "satb") == 0) {
521 _gc_mode = new ShenandoahSATBMode();
522 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
523 _gc_mode = new ShenandoahPassiveMode();
524 } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
525 _gc_mode = new ShenandoahGenerationalMode();
526 } else {
527 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
528 }
529 } else {
530 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
531 }
532 _gc_mode->initialize_flags();
533 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
534 vm_exit_during_initialization(
535 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
536 _gc_mode->name()));
537 }
538 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
539 vm_exit_during_initialization(
540 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
541 _gc_mode->name()));
542 }
543 }
544
545 void ShenandoahHeap::initialize_heuristics() {
546 _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers());
547 _global_generation->initialize_heuristics(mode());
548 }
549
550 #ifdef _MSC_VER
551 #pragma warning( push )
552 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
553 #endif
554
555 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
556 CollectedHeap(),
557 _active_generation(nullptr),
558 _initial_size(0),
559 _committed(0),
560 _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
561 _workers(nullptr),
562 _safepoint_workers(nullptr),
563 _heap_region_special(false),
564 _num_regions(0),
565 _regions(nullptr),
566 _affiliations(nullptr),
567 _gc_state_changed(false),
568 _gc_no_progress_count(0),
569 _cancel_requested_time(0),
570 _update_refs_iterator(this),
571 _global_generation(nullptr),
572 _control_thread(nullptr),
573 _uncommit_thread(nullptr),
574 _young_generation(nullptr),
575 _old_generation(nullptr),
576 _shenandoah_policy(policy),
577 _gc_mode(nullptr),
578 _free_set(nullptr),
579 _verifier(nullptr),
580 _phase_timings(nullptr),
581 _monitoring_support(nullptr),
582 _memory_pool(nullptr),
583 _stw_memory_manager("Shenandoah Pauses"),
584 _cycle_memory_manager("Shenandoah Cycles"),
585 _gc_timer(new ConcurrentGCTimer()),
586 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
587 _marking_context(nullptr),
588 _bitmap_size(0),
589 _bitmap_regions_per_slice(0),
590 _bitmap_bytes_per_slice(0),
591 _bitmap_region_special(false),
592 _aux_bitmap_region_special(false),
593 _liveness_cache(nullptr),
594 _collection_set(nullptr),
595 _evac_tracker(new ShenandoahEvacuationTracker())
596 {
597 // Initialize GC mode early, many subsequent initialization procedures depend on it
598 initialize_mode();
599 _cancelled_gc.set(GCCause::_no_gc);
600 }
601
602 #ifdef _MSC_VER
603 #pragma warning( pop )
604 #endif
605
606 void ShenandoahHeap::print_heap_on(outputStream* st) const {
607 const bool is_generational = mode()->is_generational();
608 const char* front_spacing = "";
609 if (is_generational) {
610 st->print_cr("Generational Shenandoah Heap");
611 st->print_cr(" Young:");
612 st->print_cr(" " PROPERFMT " max, " PROPERFMT " used", PROPERFMTARGS(young_generation()->max_capacity()), PROPERFMTARGS(young_generation()->used()));
613 st->print_cr(" Old:");
614 st->print_cr(" " PROPERFMT " max, " PROPERFMT " used", PROPERFMTARGS(old_generation()->max_capacity()), PROPERFMTARGS(old_generation()->used()));
615 st->print_cr(" Entire heap:");
616 st->print_cr(" " PROPERFMT " soft max, " PROPERFMT " committed",
617 PROPERFMTARGS(soft_max_capacity()), PROPERFMTARGS(committed()));
618 front_spacing = " ";
619 } else {
620 st->print_cr("Shenandoah Heap");
621 st->print_cr(" " PROPERFMT " max, " PROPERFMT " soft max, " PROPERFMT " committed, " PROPERFMT " used",
622 PROPERFMTARGS(max_capacity()),
623 PROPERFMTARGS(soft_max_capacity()),
624 PROPERFMTARGS(committed()),
625 PROPERFMTARGS(used())
626 );
627 }
628 st->print_cr("%s %zu x " PROPERFMT " regions",
629 front_spacing,
630 num_regions(),
631 PROPERFMTARGS(ShenandoahHeapRegion::region_size_bytes()));
632
633 st->print("Status: ");
634 if (has_forwarded_objects()) st->print("has forwarded objects, ");
635 if (!is_generational) {
636 if (is_concurrent_mark_in_progress()) st->print("marking,");
637 } else {
638 if (is_concurrent_old_mark_in_progress()) st->print("old marking, ");
639 if (is_concurrent_young_mark_in_progress()) st->print("young marking, ");
640 }
641 if (is_evacuation_in_progress()) st->print("evacuating, ");
642 if (is_update_refs_in_progress()) st->print("updating refs, ");
643 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
644 if (is_full_gc_in_progress()) st->print("full gc, ");
645 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
646 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
647 if (is_concurrent_strong_root_in_progress() &&
648 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
649
650 if (cancelled_gc()) {
651 st->print("cancelled");
652 } else {
653 st->print("not cancelled");
654 }
655 st->cr();
656
657 st->print_cr("Reserved region:");
658 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
659 p2i(reserved_region().start()),
660 p2i(reserved_region().end()));
661
662 ShenandoahCollectionSet* cset = collection_set();
663 st->print_cr("Collection set:");
664 if (cset != nullptr) {
665 st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
666 st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
667 } else {
668 st->print_cr(" (null)");
669 }
670
671 st->cr();
672
673 if (Verbose) {
674 st->cr();
675 print_heap_regions_on(st);
676 }
677 }
678
679 void ShenandoahHeap::print_gc_on(outputStream* st) const {
680 print_heap_regions_on(st);
681 }
682
683 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
684 public:
685 void do_thread(Thread* thread) {
686 assert(thread != nullptr, "Sanity");
687 ShenandoahThreadLocalData::initialize_gclab(thread);
688 }
689 };
690
691 void ShenandoahHeap::initialize_generations() {
692 _global_generation->post_initialize(this);
693 }
694
695 // We do not call this explicitly It is called by Hotspot infrastructure.
696 void ShenandoahHeap::post_initialize() {
697 CollectedHeap::post_initialize();
698
699 check_soft_max_changed();
700
701 // Schedule periodic task to report on gc thread CPU utilization
702 _mmu_tracker.initialize();
703
704 MutexLocker ml(Threads_lock);
705
706 ShenandoahInitWorkerGCLABClosure init_gclabs;
707 _workers->threads_do(&init_gclabs);
708
709 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
710 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
711 _workers->set_initialize_gclab();
712
713 // Note that the safepoint workers may require gclabs if the threads are used to create a heap dump
714 // during a concurrent evacuation phase.
715 if (_safepoint_workers != nullptr) {
716 _safepoint_workers->threads_do(&init_gclabs);
717 _safepoint_workers->set_initialize_gclab();
718 }
719
720 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
721 }
722
723 void ShenandoahHeap::post_initialize_heuristics() {
724 _global_generation->post_initialize_heuristics();
725 }
726
727 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
728 return _global_generation->heuristics();
729 }
730
731 size_t ShenandoahHeap::used() const {
732 return global_generation()->used();
733 }
734
735 size_t ShenandoahHeap::committed() const {
736 return _committed.load_relaxed();
737 }
738
739 void ShenandoahHeap::increase_committed(size_t bytes) {
740 shenandoah_assert_heaplocked_or_safepoint();
741 _committed.fetch_then_add(bytes, memory_order_relaxed);
742 }
743
744 void ShenandoahHeap::decrease_committed(size_t bytes) {
745 shenandoah_assert_heaplocked_or_safepoint();
746 _committed.fetch_then_sub(bytes, memory_order_relaxed);
747 }
748
749 size_t ShenandoahHeap::capacity() const {
750 return committed();
751 }
752
753 size_t ShenandoahHeap::max_capacity() const {
754 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
755 }
756
757 size_t ShenandoahHeap::soft_max_capacity() const {
758 size_t v = _soft_max_size.load_relaxed();
759 assert(min_capacity() <= v && v <= max_capacity(),
760 "Should be in bounds: %zu <= %zu <= %zu",
761 min_capacity(), v, max_capacity());
762 return v;
763 }
764
765 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
766 assert(min_capacity() <= v && v <= max_capacity(),
767 "Should be in bounds: %zu <= %zu <= %zu",
768 min_capacity(), v, max_capacity());
769 _soft_max_size.store_relaxed(v);
770 heuristics()->compute_headroom_adjustment();
771 }
772
773 size_t ShenandoahHeap::min_capacity() const {
774 return _minimum_size;
775 }
776
777 size_t ShenandoahHeap::initial_capacity() const {
778 return _initial_size;
779 }
780
781 bool ShenandoahHeap::is_in(const void* p) const {
782 if (!is_in_reserved(p)) {
783 return false;
784 }
785
786 if (is_full_gc_move_in_progress()) {
787 // Full GC move is running, we do not have a consistent region
788 // information yet. But we know the pointer is in heap.
789 return true;
790 }
791
792 // Now check if we point to a live section in active region.
793 const ShenandoahHeapRegion* r = heap_region_containing(p);
794 if (p >= r->top()) {
795 return false;
796 }
797
798 if (r->is_active()) {
799 return true;
800 }
801
802 // The region is trash, but won't be recycled until after concurrent weak
803 // roots. We also don't allow mutators to allocate from trash regions
804 // during weak roots. Concurrent class unloading may access unmarked oops
805 // in trash regions.
806 return r->is_trash() && is_concurrent_weak_root_in_progress();
807 }
808
809 void ShenandoahHeap::notify_soft_max_changed() {
810 if (_uncommit_thread != nullptr) {
811 _uncommit_thread->notify_soft_max_changed();
812 }
813 }
814
815 void ShenandoahHeap::notify_explicit_gc_requested() {
816 if (_uncommit_thread != nullptr) {
817 _uncommit_thread->notify_explicit_gc_requested();
818 }
819 }
820
821 bool ShenandoahHeap::check_soft_max_changed() {
822 size_t new_soft_max = AtomicAccess::load(&SoftMaxHeapSize);
823 size_t old_soft_max = soft_max_capacity();
824 if (new_soft_max != old_soft_max) {
825 new_soft_max = MAX2(min_capacity(), new_soft_max);
826 new_soft_max = MIN2(max_capacity(), new_soft_max);
827 if (new_soft_max != old_soft_max) {
828 log_info(gc)("Soft Max Heap Size: %zu%s -> %zu%s",
829 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
830 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
831 );
832 set_soft_max_capacity(new_soft_max);
833 return true;
834 }
835 }
836 return false;
837 }
838
839 void ShenandoahHeap::notify_heap_changed() {
840 // Update monitoring counters when we took a new region. This amortizes the
841 // update costs on slow path.
842 monitoring_support()->notify_heap_changed();
843 _heap_changed.try_set();
844 }
845
846 void ShenandoahHeap::start_idle_span() {
847 heuristics()->start_idle_span();
848 }
849
850 void ShenandoahHeap::set_forced_counters_update(bool value) {
851 monitoring_support()->set_forced_counters_update(value);
852 }
853
854 void ShenandoahHeap::handle_force_counters_update() {
855 monitoring_support()->handle_force_counters_update();
856 }
857
858 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
859 // New object should fit the GCLAB size
860 size_t min_size = MAX2(size, PLAB::min_size());
861
862 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
863 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
864
865 new_size = MIN2(new_size, PLAB::max_size());
866 new_size = MAX2(new_size, PLAB::min_size());
867
868 // Record new heuristic value even if we take any shortcut. This captures
869 // the case when moderately-sized objects always take a shortcut. At some point,
870 // heuristics should catch up with them.
871 log_debug(gc, free)("Set new GCLAB size: %zu", new_size);
872 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
873
874 if (new_size < size) {
875 // New size still does not fit the object. Fall back to shared allocation.
876 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
877 log_debug(gc, free)("New gclab size (%zu) is too small for %zu", new_size, size);
878 return nullptr;
879 }
880
881 // Retire current GCLAB, and allocate a new one.
882 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
883 gclab->retire();
884
885 size_t actual_size = 0;
886 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
887 if (gclab_buf == nullptr) {
888 return nullptr;
889 }
890
891 assert (size <= actual_size, "allocation should fit");
892
893 // ...and clear or zap just allocated TLAB, if needed.
894 if (ZeroTLAB) {
895 Copy::zero_to_words(gclab_buf, actual_size);
896 } else if (ZapTLAB) {
897 // Skip mangling the space corresponding to the object header to
898 // ensure that the returned space is not considered parsable by
899 // any concurrent GC thread.
900 size_t hdr_size = oopDesc::header_size();
901 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
902 }
903 gclab->set_buf(gclab_buf, actual_size);
904 return gclab->allocate(size);
905 }
906
907 // Called from stubs in JIT code or interpreter
908 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
909 size_t requested_size,
910 size_t* actual_size) {
911 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
912 HeapWord* res = allocate_memory(req);
913 if (res != nullptr) {
914 *actual_size = req.actual_size();
915 } else {
916 *actual_size = 0;
917 }
918 return res;
919 }
920
921 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
922 size_t word_size,
923 size_t* actual_size) {
924 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
925 HeapWord* res = allocate_memory(req);
926 if (res != nullptr) {
927 *actual_size = req.actual_size();
928 } else {
929 *actual_size = 0;
930 }
931 return res;
932 }
933
934 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
935 bool in_new_region = false;
936 HeapWord* result = nullptr;
937
938 if (req.is_mutator_alloc()) {
939
940 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
941 result = allocate_memory_under_lock(req, in_new_region);
942 }
943
944 // Check that gc overhead is not exceeded.
945 //
946 // Shenandoah will grind along for quite a while allocating one
947 // object at a time using shared (non-tlab) allocations. This check
948 // is testing that the GC overhead limit has not been exceeded.
949 // This will notify the collector to start a cycle, but will raise
950 // an OOME to the mutator if the last Full GCs have not made progress.
951 // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
952 if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
953 control_thread()->handle_alloc_failure(req, false);
954 req.set_actual_size(0);
955 return nullptr;
956 }
957
958 if (result == nullptr) {
959 // Block until control thread reacted, then retry allocation.
960 //
961 // It might happen that one of the threads requesting allocation would unblock
962 // way later after GC happened, only to fail the second allocation, because
963 // other threads have already depleted the free storage. In this case, a better
964 // strategy is to try again, until at least one full GC has completed.
965 //
966 // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
967 // a) We experienced a GC that had good progress, or
968 // b) We experienced at least one Full GC (whether or not it had good progress)
969
970 const size_t original_count = shenandoah_policy()->full_gc_count();
971 while (result == nullptr && should_retry_allocation(original_count)) {
972 control_thread()->handle_alloc_failure(req, true);
973 result = allocate_memory_under_lock(req, in_new_region);
974 }
975 if (result != nullptr) {
976 // If our allocation request has been satisfied after it initially failed, we count this as good gc progress
977 notify_gc_progress();
978 }
979 if (log_develop_is_enabled(Debug, gc, alloc)) {
980 ResourceMark rm;
981 log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: %zu"
982 ", Original: %zu, Latest: %zu",
983 Thread::current()->name(), p2i(result), req.type_string(), req.size(),
984 original_count, get_gc_no_progress_count());
985 }
986 }
987 } else {
988 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
989 result = allocate_memory_under_lock(req, in_new_region);
990 // Do not call handle_alloc_failure() here, because we cannot block.
991 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
992 }
993
994 if (in_new_region) {
995 notify_heap_changed();
996 }
997
998 if (result == nullptr) {
999 req.set_actual_size(0);
1000 }
1001
1002 if (result != nullptr) {
1003 size_t requested = req.size();
1004 size_t actual = req.actual_size();
1005
1006 assert (req.is_lab_alloc() || (requested == actual),
1007 "Only LAB allocations are elastic: %s, requested = %zu, actual = %zu",
1008 req.type_string(), requested, actual);
1009 }
1010
1011 return result;
1012 }
1013
1014 inline bool ShenandoahHeap::should_retry_allocation(size_t original_full_gc_count) const {
1015 return shenandoah_policy()->full_gc_count() == original_full_gc_count
1016 && !shenandoah_policy()->is_at_shutdown();
1017 }
1018
1019 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1020 // If we are dealing with mutator allocation, then we may need to block for safepoint.
1021 // We cannot block for safepoint for GC allocations, because there is a high chance
1022 // we are already running at safepoint or from stack watermark machinery, and we cannot
1023 // block again.
1024 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1025
1026 // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1027 if (req.is_old() && !old_generation()->can_allocate(req)) {
1028 return nullptr;
1029 }
1030
1031 // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1032 // memory.
1033 HeapWord* result = _free_set->allocate(req, in_new_region);
1034
1035 // Record the plab configuration for this result and register the object.
1036 if (result != nullptr && req.is_old()) {
1037 if (req.is_lab_alloc()) {
1038 old_generation()->configure_plab_for_current_thread(req);
1039 } else {
1040 // Register the newly allocated object while we're holding the global lock since there's no synchronization
1041 // built in to the implementation of register_object(). There are potential races when multiple independent
1042 // threads are allocating objects, some of which might span the same card region. For example, consider
1043 // a card table's memory region within which three objects are being allocated by three different threads:
1044 //
1045 // objects being "concurrently" allocated:
1046 // [-----a------][-----b-----][--------------c------------------]
1047 // [---- card table memory range --------------]
1048 //
1049 // Before any objects are allocated, this card's memory range holds no objects. Note that allocation of object a
1050 // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1051 // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1052 // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1053 // card region.
1054 //
1055 // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1056 // last-start representing object b while first-start represents object c. This is why we need to require all
1057 // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1058 old_generation()->card_scan()->register_object(result);
1059
1060 if (req.is_promotion()) {
1061 // Shared promotion.
1062 const size_t actual_size = req.actual_size() * HeapWordSize;
1063 log_debug(gc, plab)("Expend shared promotion of %zu bytes", actual_size);
1064 old_generation()->expend_promoted(actual_size);
1065 }
1066 }
1067 }
1068
1069 return result;
1070 }
1071
1072 HeapWord* ShenandoahHeap::mem_allocate(size_t size) {
1073 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1074 return allocate_memory(req);
1075 }
1076
1077 oop ShenandoahHeap::array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS) {
1078 ShenandoahObjArrayAllocator allocator(klass, size, length, do_zero, THREAD);
1079 return allocator.allocate();
1080 }
1081
1082 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1083 size_t size,
1084 Metaspace::MetadataType mdtype) {
1085 MetaWord* result;
1086
1087 // Inform metaspace OOM to GC heuristics if class unloading is possible.
1088 ShenandoahHeuristics* h = global_generation()->heuristics();
1089 if (h->can_unload_classes()) {
1090 h->record_metaspace_oom();
1091 }
1092
1093 // Expand and retry allocation
1094 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1095 if (result != nullptr) {
1096 return result;
1097 }
1098
1099 // Start full GC
1100 collect(GCCause::_metadata_GC_clear_soft_refs);
1101
1102 // Retry allocation
1103 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1104 if (result != nullptr) {
1105 return result;
1106 }
1107
1108 // Expand and retry allocation
1109 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1110 if (result != nullptr) {
1111 return result;
1112 }
1113
1114 // Out of memory
1115 return nullptr;
1116 }
1117
1118 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1119 private:
1120 ShenandoahHeap* const _heap;
1121 Thread* const _thread;
1122 public:
1123 ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1124 _heap(heap), _thread(Thread::current()) {}
1125
1126 void do_object(oop p) {
1127 shenandoah_assert_marked(nullptr, p);
1128 if (!p->is_forwarded()) {
1129 _heap->evacuate_object(p, _thread);
1130 }
1131 }
1132 };
1133
1134 class ShenandoahEvacuationTask : public WorkerTask {
1135 private:
1136 ShenandoahHeap* const _sh;
1137 ShenandoahCollectionSet* const _cs;
1138 bool _concurrent;
1139 public:
1140 ShenandoahEvacuationTask(ShenandoahHeap* sh,
1141 ShenandoahCollectionSet* cs,
1142 bool concurrent) :
1143 WorkerTask("Shenandoah Evacuation"),
1144 _sh(sh),
1145 _cs(cs),
1146 _concurrent(concurrent)
1147 {}
1148
1149 void work(uint worker_id) {
1150 if (_concurrent) {
1151 ShenandoahConcurrentWorkerSession worker_session(worker_id);
1152 SuspendibleThreadSetJoiner stsj;
1153 do_work();
1154 } else {
1155 ShenandoahParallelWorkerSession worker_session(worker_id);
1156 do_work();
1157 }
1158 }
1159
1160 private:
1161 void do_work() {
1162 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1163 ShenandoahHeapRegion* r;
1164 while ((r =_cs->claim_next()) != nullptr) {
1165 assert(r->has_live(), "Region %zu should have been reclaimed early", r->index());
1166 _sh->marked_object_iterate(r, &cl);
1167
1168 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1169 break;
1170 }
1171 }
1172 }
1173 };
1174
1175 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1176 private:
1177 bool const _resize;
1178 public:
1179 explicit ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1180 void do_thread(Thread* thread) override {
1181 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1182 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1183 gclab->retire();
1184 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1185 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1186 }
1187
1188 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1189 ShenandoahPLAB* shenandoah_plab = ShenandoahThreadLocalData::shenandoah_plab(thread);
1190 assert(shenandoah_plab != nullptr, "PLAB should be initialized for %s", thread->name());
1191
1192 // There are two reasons to retire all plabs between old-gen evacuation passes.
1193 // 1. We need to make the plab memory parsable by remembered-set scanning.
1194 // 2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1195 shenandoah_plab->retire();
1196
1197 // Re-enable promotions for the next evacuation phase.
1198 shenandoah_plab->enable_promotions();
1199
1200 // Reset the fill size for next evacuation phase.
1201 if (_resize && shenandoah_plab->desired_size() > 0) {
1202 shenandoah_plab->set_desired_size(0);
1203 }
1204 }
1205 }
1206 };
1207
1208 class ShenandoahGCStatePropagatorHandshakeClosure : public HandshakeClosure {
1209 public:
1210 explicit ShenandoahGCStatePropagatorHandshakeClosure(char gc_state) :
1211 HandshakeClosure("Shenandoah GC State Change"),
1212 _gc_state(gc_state) {}
1213
1214 void do_thread(Thread* thread) override {
1215 ShenandoahThreadLocalData::set_gc_state(thread, _gc_state);
1216 }
1217 private:
1218 char _gc_state;
1219 };
1220
1221 class ShenandoahPrepareForUpdateRefsHandshakeClosure : public HandshakeClosure {
1222 public:
1223 explicit ShenandoahPrepareForUpdateRefsHandshakeClosure(char gc_state) :
1224 HandshakeClosure("Shenandoah Prepare for Update Refs"),
1225 _retire(ResizeTLAB), _propagator(gc_state) {}
1226
1227 void do_thread(Thread* thread) override {
1228 _propagator.do_thread(thread);
1229 if (ShenandoahThreadLocalData::gclab(thread) != nullptr) {
1230 _retire.do_thread(thread);
1231 }
1232 }
1233 private:
1234 ShenandoahRetireGCLABClosure _retire;
1235 ShenandoahGCStatePropagatorHandshakeClosure _propagator;
1236 };
1237
1238 void ShenandoahHeap::evacuate_collection_set(ShenandoahGeneration* generation, bool concurrent) {
1239 assert(generation->is_global(), "Only global generation expected here");
1240 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1241 workers()->run_task(&task);
1242 }
1243
1244 void ShenandoahHeap::concurrent_prepare_for_update_refs() {
1245 {
1246 // Java threads take this lock while they are being attached and added to the list of threads.
1247 // If another thread holds this lock before we update the gc state, it will receive a stale
1248 // gc state, but they will have been added to the list of java threads and so will be corrected
1249 // by the following handshake.
1250 MutexLocker lock(Threads_lock);
1251
1252 // A cancellation at this point means the degenerated cycle must resume from update-refs.
1253 set_gc_state_concurrent(EVACUATION, false);
1254 set_gc_state_concurrent(WEAK_ROOTS, false);
1255 set_gc_state_concurrent(UPDATE_REFS, true);
1256 }
1257
1258 // This will propagate the gc state and retire gclabs and plabs for threads that require it.
1259 ShenandoahPrepareForUpdateRefsHandshakeClosure prepare_for_update_refs(_gc_state.raw_value());
1260
1261 // The handshake won't touch worker threads (or control thread, or VM thread), so do those separately.
1262 Threads::non_java_threads_do(&prepare_for_update_refs);
1263
1264 // Now retire gclabs and plabs and propagate gc_state for mutator threads
1265 Handshake::execute(&prepare_for_update_refs);
1266
1267 _update_refs_iterator.reset();
1268 }
1269
1270 class ShenandoahCompositeHandshakeClosure : public HandshakeClosure {
1271 HandshakeClosure* _handshake_1;
1272 HandshakeClosure* _handshake_2;
1273 public:
1274 ShenandoahCompositeHandshakeClosure(HandshakeClosure* handshake_1, HandshakeClosure* handshake_2) :
1275 HandshakeClosure(handshake_2->name()),
1276 _handshake_1(handshake_1), _handshake_2(handshake_2) {}
1277
1278 void do_thread(Thread* thread) override {
1279 _handshake_1->do_thread(thread);
1280 _handshake_2->do_thread(thread);
1281 }
1282 };
1283
1284 void ShenandoahHeap::concurrent_final_roots(HandshakeClosure* handshake_closure) {
1285 {
1286 assert(!is_evacuation_in_progress(), "Should not evacuate for abbreviated or old cycles");
1287 MutexLocker lock(Threads_lock);
1288 set_gc_state_concurrent(WEAK_ROOTS, false);
1289 }
1290
1291 ShenandoahGCStatePropagatorHandshakeClosure propagator(_gc_state.raw_value());
1292 Threads::non_java_threads_do(&propagator);
1293 if (handshake_closure == nullptr) {
1294 Handshake::execute(&propagator);
1295 } else {
1296 ShenandoahCompositeHandshakeClosure composite(&propagator, handshake_closure);
1297 Handshake::execute(&composite);
1298 }
1299 }
1300
1301 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1302 assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1303
1304 ShenandoahHeapRegion* r = heap_region_containing(p);
1305 assert(!r->is_humongous(), "never evacuate humongous objects");
1306
1307 ShenandoahAffiliation target_gen = r->affiliation();
1308 return try_evacuate_object(p, thread, r, target_gen);
1309 }
1310
1311 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1312 ShenandoahAffiliation target_gen) {
1313 assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1314 assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1315 bool alloc_from_lab = true;
1316 HeapWord* copy = nullptr;
1317 size_t size = ShenandoahForwarding::size(p);
1318
1319 #ifdef ASSERT
1320 if (ShenandoahOOMDuringEvacALot &&
1321 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1322 copy = nullptr;
1323 } else {
1324 #endif
1325 if (UseTLAB) {
1326 copy = allocate_from_gclab(thread, size);
1327 }
1328 if (copy == nullptr) {
1329 // If we failed to allocate in LAB, we'll try a shared allocation.
1330 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1331 copy = allocate_memory(req);
1332 alloc_from_lab = false;
1333 }
1334 #ifdef ASSERT
1335 }
1336 #endif
1337
1338 if (copy == nullptr) {
1339 control_thread()->handle_alloc_failure_evac(size);
1340
1341 // Install the self-forwarded bit on p so other evacuators/LRBs see
1342 // the object as "already handled, do not try to evacuate". The CAS
1343 // may fail if another thread concurrently installed a real forwardee
1344 // (they succeeded where we failed) or self-forwarded first.
1345 markWord old_mark = p->mark();
1346 if (old_mark.is_forwarded()) {
1347 return ShenandoahForwarding::get_forwardee(p);
1348 }
1349 oop winner = ShenandoahForwarding::try_forward_to_self(p, old_mark);
1350 if (winner == nullptr) {
1351 // We own the self-forwarding. Flag the region so the degen/full GC
1352 // entry drain knows to scan it for self_fwd bits to clear.
1353 from_region->set_has_self_forwards();
1354 return p;
1355 }
1356 return winner;
1357 }
1358
1359 if (ShenandoahEvacTracking) {
1360 evac_tracker()->begin_evacuation(thread, size * HeapWordSize, from_region->affiliation(), target_gen);
1361 }
1362
1363 // Copy the object:
1364 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1365
1366 oop copy_val = cast_to_oop(copy);
1367
1368 // Relativize stack chunks before publishing the copy. After the forwarding CAS,
1369 // mutators can see the copy and thaw it via the fast path if flags == 0. We must
1370 // relativize derived pointers and set gc_mode before that happens. Skip if the
1371 // copy's mark word is already a forwarding pointer (another thread won the race
1372 // and overwrote the original's header before we copied it).
1373 if (!ShenandoahForwarding::is_forwarded(copy_val)) {
1374 ContinuationGCSupport::relativize_stack_chunk(copy_val);
1375 }
1376
1377 // Try to install the new forwarding pointer.
1378 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1379 if (result == copy_val) {
1380 // Successfully evacuated. Our copy is now the public one!
1381 shenandoah_assert_correct(nullptr, copy_val);
1382 if (ShenandoahEvacTracking) {
1383 evac_tracker()->end_evacuation(thread, size * HeapWordSize, from_region->affiliation(), target_gen);
1384 }
1385 return copy_val;
1386 } else {
1387 // Failed to evacuate. We need to deal with the object that is left behind. Since this
1388 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1389 // But if it happens to contain references to evacuated regions, those references would
1390 // not get updated for this stale copy during this cycle, and we will crash while scanning
1391 // it the next cycle.
1392 if (alloc_from_lab) {
1393 // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1394 // object will overwrite this stale copy, or the filler object on LAB retirement will
1395 // do this.
1396 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1397 } else {
1398 // For non-LAB allocations, we have no way to retract the allocation, and
1399 // have to explicitly overwrite the copy with the filler object. With that overwrite,
1400 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1401 assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1402 fill_with_object(copy, size);
1403 shenandoah_assert_correct(nullptr, copy_val);
1404 // For non-LAB allocations, the object has already been registered
1405 }
1406 shenandoah_assert_correct(nullptr, result);
1407 return result;
1408 }
1409 }
1410
1411 // Clear the self_fwd bit on a live cset object, if set. Runs at a safepoint,
1412 // so a plain store is sufficient — no concurrent writers to the mark word.
1413 class ShenandoahUnSelfForwardObjectClosure : public ObjectClosure {
1414 public:
1415 void do_object(oop obj) override {
1416 markWord m = obj->mark();
1417 if (m.is_self_forwarded()) {
1418 obj->set_mark(m.unset_self_forwarded());
1419 }
1420 }
1421 };
1422
1423 // Parallel task over flagged cset regions. Iterates the live objects via the
1424 // mark bitmap (skipping evacuated and never-marked memory), clears self_fwd
1425 // bits, and resets the region flag once done.
1426 class ShenandoahUnSelfForwardTask : public WorkerTask {
1427 private:
1428 ShenandoahHeap* const _heap;
1429 ShenandoahCollectionSet* const _cs;
1430
1431 public:
1432 ShenandoahUnSelfForwardTask(ShenandoahHeap* heap, ShenandoahCollectionSet* cs) :
1433 WorkerTask("Shenandoah Un-Self-Forward"),
1434 _heap(heap),
1435 _cs(cs) {}
1436
1437 void work(uint worker_id) override {
1438 ShenandoahParallelWorkerSession worker_session(worker_id);
1439 ShenandoahUnSelfForwardObjectClosure cl;
1440 ShenandoahHeapRegion* r;
1441 while ((r = _cs->claim_next()) != nullptr) {
1442 if (r->has_self_forwards()) {
1443 _heap->marked_object_iterate(r, &cl);
1444 r->clear_has_self_forwards();
1445 }
1446 }
1447 }
1448 };
1449
1450 void ShenandoahHeap::un_self_forward_cset_regions() {
1451 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1452 ShenandoahCollectionSet* cs = collection_set();
1453 if (cs == nullptr || cs->is_empty()) {
1454 return;
1455 }
1456 cs->clear_current_index();
1457 ShenandoahUnSelfForwardTask task(this, cs);
1458 workers()->run_task(&task);
1459 DEBUG_ONLY(assert_no_self_forwards());
1460 }
1461
1462 #ifdef ASSERT
1463 void ShenandoahHeap::assert_no_self_forwards() const {
1464 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1465 ShenandoahCollectionSet* cs = collection_set();
1466 if (cs == nullptr) return;
1467 cs->clear_current_index();
1468 ShenandoahHeapRegion* r;
1469 while ((r = cs->next()) != nullptr) {
1470 assert(!r->has_self_forwards(), "region still flagged after drain");
1471 }
1472 cs->clear_current_index();
1473 }
1474 #endif
1475
1476 void ShenandoahHeap::trash_cset_regions() {
1477 ShenandoahHeapLocker locker(lock());
1478
1479 ShenandoahCollectionSet* set = collection_set();
1480 ShenandoahHeapRegion* r;
1481 set->clear_current_index();
1482 while ((r = set->next()) != nullptr) {
1483 r->make_trash();
1484 }
1485 collection_set()->clear();
1486 }
1487
1488 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1489 st->print_cr("Heap Regions:");
1490 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1491 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1492 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1493 st->print_cr("UWM=update watermark, U=used");
1494 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1495 st->print_cr("S=shared allocs, L=live data");
1496 st->print_cr("CP=critical pins");
1497
1498 for (size_t i = 0; i < num_regions(); i++) {
1499 get_region(i)->print_on(st);
1500 }
1501 }
1502
1503 void ShenandoahHeap::process_gc_stats() const {
1504 // Commit worker statistics to cycle data
1505 phase_timings()->flush_par_workers_to_cycle();
1506
1507 // Print GC stats for current cycle
1508 LogTarget(Info, gc, stats) lt;
1509 if (lt.is_enabled()) {
1510 ResourceMark rm;
1511 LogStream ls(lt);
1512 phase_timings()->print_cycle_on(&ls);
1513 if (ShenandoahEvacTracking) {
1514 ShenandoahCycleStats evac_stats = evac_tracker()->flush_cycle_to_global();
1515 evac_tracker()->print_evacuations_on(&ls, &evac_stats.workers,
1516 &evac_stats.mutators);
1517 }
1518 }
1519
1520 // Commit statistics to globals
1521 phase_timings()->flush_cycle_to_global();
1522 }
1523
1524 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) const {
1525 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1526 assert(!start->has_live(), "liveness must be zero");
1527
1528 // Do not try to get the size of this humongous object. STW collections will
1529 // have already unloaded classes, so an unmarked object may have a bad klass pointer.
1530 ShenandoahHeapRegion* region = start;
1531 size_t index = region->index();
1532 do {
1533 assert(region->is_humongous(), "Expect correct humongous start or continuation");
1534 assert(!region->is_cset(), "Humongous region should not be in collection set");
1535 region->make_trash_immediate();
1536 region = get_region(++index);
1537 } while (region != nullptr && region->is_humongous_continuation());
1538
1539 // Return number of regions trashed
1540 return index - start->index();
1541 }
1542
1543 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1544 public:
1545 ShenandoahCheckCleanGCLABClosure() {}
1546 void do_thread(Thread* thread) {
1547 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1548 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1549 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1550
1551 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1552 ShenandoahPLAB* shenandoah_plab = ShenandoahThreadLocalData::shenandoah_plab(thread);
1553 assert(shenandoah_plab != nullptr, "PLAB should be initialized for %s", thread->name());
1554 assert(shenandoah_plab->plab()->words_remaining() == 0, "PLAB should not need retirement");
1555 }
1556 }
1557 };
1558
1559 void ShenandoahHeap::labs_make_parsable() {
1560 assert(UseTLAB, "Only call with UseTLAB");
1561
1562 ShenandoahRetireGCLABClosure cl(false);
1563
1564 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1565 ThreadLocalAllocBuffer& tlab = t->tlab();
1566 tlab.make_parsable();
1567 if (ZeroTLAB) {
1568 t->retire_tlab();
1569 }
1570 cl.do_thread(t);
1571 }
1572
1573 workers()->threads_do(&cl);
1574
1575 if (safepoint_workers() != nullptr) {
1576 safepoint_workers()->threads_do(&cl);
1577 }
1578 }
1579
1580 void ShenandoahHeap::tlabs_retire(bool resize) {
1581 assert(UseTLAB, "Only call with UseTLAB");
1582 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1583
1584 ThreadLocalAllocStats stats;
1585
1586 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1587 t->retire_tlab(&stats);
1588 if (resize) {
1589 t->tlab().resize();
1590 }
1591 }
1592
1593 stats.publish();
1594
1595 #ifdef ASSERT
1596 ShenandoahCheckCleanGCLABClosure cl;
1597 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1598 cl.do_thread(t);
1599 }
1600 workers()->threads_do(&cl);
1601 #endif
1602 }
1603
1604 void ShenandoahHeap::gclabs_retire(bool resize) {
1605 assert(UseTLAB, "Only call with UseTLAB");
1606 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1607
1608 ShenandoahRetireGCLABClosure cl(resize);
1609 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1610 cl.do_thread(t);
1611 }
1612
1613 workers()->threads_do(&cl);
1614
1615 if (safepoint_workers() != nullptr) {
1616 safepoint_workers()->threads_do(&cl);
1617 }
1618 }
1619
1620 // Returns size in bytes
1621 size_t ShenandoahHeap::unsafe_max_tlab_alloc() const {
1622 // Return the max allowed size, and let the allocation path
1623 // figure out the safe size for current allocation.
1624 return ShenandoahHeapRegion::max_tlab_size_bytes();
1625 }
1626
1627 size_t ShenandoahHeap::max_tlab_size() const {
1628 // Returns size in words
1629 return ShenandoahHeapRegion::max_tlab_size_words();
1630 }
1631
1632 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1633 // These requests are ignored because we can't easily have Shenandoah jump into
1634 // a synchronous (degenerated or full) cycle while it is in the middle of a concurrent
1635 // cycle. We _could_ cancel the concurrent cycle and then try to run a cycle directly
1636 // on the VM thread, but this would confuse the control thread mightily and doesn't
1637 // seem worth the trouble. Instead, we will have the caller thread run (and wait for) a
1638 // concurrent cycle in the prologue of the heap inspect/dump operation (see VM_HeapDumper::doit_prologue).
1639 // This is how other concurrent collectors in the JVM handle this scenario as well.
1640 assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
1641 guarantee(cause == GCCause::_heap_dump || cause == GCCause::_heap_inspection, "Invalid cause");
1642 }
1643
1644 void ShenandoahHeap::collect(GCCause::Cause cause) {
1645 control_thread()->request_gc(cause);
1646 }
1647
1648 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1649 // This method is only called by `CollectedHeap::collect_as_vm_thread`, which we have
1650 // overridden to do nothing. See the comment there for an explanation of how heap inspections
1651 // work for Shenandoah.
1652 ShouldNotReachHere();
1653 }
1654
1655 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1656 ShenandoahHeapRegion* r = heap_region_containing(addr);
1657 if (r != nullptr) {
1658 return r->block_start(addr);
1659 }
1660 return nullptr;
1661 }
1662
1663 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1664 ShenandoahHeapRegion* r = heap_region_containing(addr);
1665 return r->block_is_obj(addr);
1666 }
1667
1668 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1669 return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1670 }
1671
1672 void ShenandoahHeap::prepare_for_verify() {
1673 if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1674 labs_make_parsable();
1675 }
1676 }
1677
1678 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1679 if (_shenandoah_policy->is_at_shutdown()) {
1680 return;
1681 }
1682
1683 if (_control_thread != nullptr) {
1684 tcl->do_thread(_control_thread);
1685 }
1686
1687 if (_uncommit_thread != nullptr) {
1688 tcl->do_thread(_uncommit_thread);
1689 }
1690
1691 workers()->threads_do(tcl);
1692 if (_safepoint_workers != nullptr) {
1693 _safepoint_workers->threads_do(tcl);
1694 }
1695 }
1696
1697 void ShenandoahHeap::print_tracing_info() const {
1698 LogTarget(Info, gc, stats) lt;
1699 if (lt.is_enabled()) {
1700 ResourceMark rm;
1701 LogStream ls(lt);
1702
1703 if (ShenandoahEvacTracking) {
1704 evac_tracker()->print_global_on(&ls);
1705 ls.cr();
1706 ls.cr();
1707 }
1708
1709 phase_timings()->print_global_on(&ls);
1710
1711 ls.cr();
1712 ls.cr();
1713
1714 shenandoah_policy()->print_gc_stats(&ls);
1715
1716 ls.cr();
1717 ls.cr();
1718 }
1719 }
1720
1721 // Active generation may only be set by the VM thread at a safepoint.
1722 void ShenandoahHeap::set_active_generation(ShenandoahGeneration* generation) {
1723 assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1724 assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1725 _active_generation = generation;
1726 }
1727
1728 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation,
1729 bool is_degenerated, bool is_out_of_cycle) {
1730 shenandoah_policy()->record_collection_cause(cause);
1731
1732 const GCCause::Cause current = gc_cause();
1733 assert(current == GCCause::_no_gc, "Over-writing cause: %s, with: %s",
1734 GCCause::to_string(current), GCCause::to_string(cause));
1735
1736 set_gc_cause(cause);
1737
1738 if (is_degenerated) {
1739 generation->heuristics()->record_degenerated_cycle_start(is_out_of_cycle);
1740 } else {
1741 generation->heuristics()->record_cycle_start();
1742 }
1743 }
1744
1745 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1746 assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1747
1748 generation->heuristics()->record_cycle_end();
1749 if (mode()->is_generational() && generation->is_global()) {
1750 // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1751 young_generation()->heuristics()->record_cycle_end();
1752 old_generation()->heuristics()->record_cycle_end();
1753 }
1754
1755 set_gc_cause(GCCause::_no_gc);
1756 }
1757
1758 void ShenandoahHeap::verify(VerifyOption vo) {
1759 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1760 if (ShenandoahVerify) {
1761 verifier()->verify_generic(active_generation(), vo);
1762 } else {
1763 // TODO: Consider allocating verification bitmaps on demand,
1764 // and turn this on unconditionally.
1765 }
1766 }
1767 }
1768 size_t ShenandoahHeap::tlab_capacity() const {
1769 return _free_set->capacity_not_holding_lock();
1770 }
1771
1772 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1773 private:
1774 MarkBitMap* _bitmap;
1775 ShenandoahScanObjectStack* _oop_stack;
1776 ShenandoahHeap* const _heap;
1777 ShenandoahMarkingContext* const _marking_context;
1778
1779 template <class T>
1780 void do_oop_work(T* p) {
1781 T o = RawAccess<>::oop_load(p);
1782 if (!CompressedOops::is_null(o)) {
1783 oop obj = CompressedOops::decode_not_null(o);
1784 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1785 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1786 return;
1787 }
1788 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1789
1790 assert(oopDesc::is_oop(obj), "must be a valid oop");
1791 if (!_bitmap->is_marked(obj)) {
1792 _bitmap->mark(obj);
1793 _oop_stack->push(obj);
1794 }
1795 }
1796 }
1797 public:
1798 ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1799 _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1800 _marking_context(_heap->marking_context()) {}
1801 void do_oop(oop* p) { do_oop_work(p); }
1802 void do_oop(narrowOop* p) { do_oop_work(p); }
1803 };
1804
1805 /*
1806 * This is public API, used in preparation of object_iterate().
1807 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1808 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1809 * control, we call SH::tlabs_retire, SH::gclabs_retire.
1810 */
1811 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1812 // No-op.
1813 }
1814
1815 /*
1816 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1817 *
1818 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1819 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1820 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1821 * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1822 * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1823 * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1824 * wiped the bitmap in preparation for next marking).
1825 *
1826 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1827 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1828 * is allowed to report dead objects, but is not required to do so.
1829 */
1830 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1831 // Reset bitmap
1832 if (!prepare_aux_bitmap_for_iteration())
1833 return;
1834
1835 ShenandoahScanObjectStack oop_stack;
1836 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1837 // Seed the stack with root scan
1838 scan_roots_for_iteration(&oop_stack, &oops);
1839
1840 // Work through the oop stack to traverse heap
1841 while (! oop_stack.is_empty()) {
1842 oop obj = oop_stack.pop();
1843 assert(oopDesc::is_oop(obj), "must be a valid oop");
1844 cl->do_object(obj);
1845 obj->oop_iterate(&oops);
1846 }
1847
1848 assert(oop_stack.is_empty(), "should be empty");
1849 // Reclaim bitmap
1850 reclaim_aux_bitmap_for_iteration();
1851 }
1852
1853 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1854 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1855 if (!_aux_bitmap_region_special) {
1856 bool success = os::commit_memory((char *) _aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false);
1857 if (!success) {
1858 log_warning(gc)("Auxiliary marking bitmap commit failed: " PTR_FORMAT " (%zu bytes)",
1859 p2i(_aux_bitmap_region.start()), _aux_bitmap_region.byte_size());
1860 return false;
1861 }
1862 }
1863 _aux_bit_map.clear();
1864 return true;
1865 }
1866
1867 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1868 // Process GC roots according to current GC cycle
1869 // This populates the work stack with initial objects
1870 // It is important to relinquish the associated locks before diving
1871 // into heap dumper
1872 uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1873 ShenandoahHeapIterationRootScanner rp(n_workers);
1874 rp.roots_do(oops);
1875 }
1876
1877 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1878 if (!_aux_bitmap_region_special) {
1879 os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size());
1880 }
1881 }
1882
1883 // Closure for parallelly iterate objects
1884 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1885 private:
1886 MarkBitMap* _bitmap;
1887 ShenandoahObjToScanQueue* _queue;
1888 ShenandoahHeap* const _heap;
1889 ShenandoahMarkingContext* const _marking_context;
1890
1891 template <class T>
1892 void do_oop_work(T* p) {
1893 T o = RawAccess<>::oop_load(p);
1894 if (!CompressedOops::is_null(o)) {
1895 oop obj = CompressedOops::decode_not_null(o);
1896 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1897 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1898 return;
1899 }
1900 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1901
1902 assert(oopDesc::is_oop(obj), "Must be a valid oop");
1903 if (_bitmap->par_mark(obj)) {
1904 _queue->push(ShenandoahMarkTask(obj));
1905 }
1906 }
1907 }
1908 public:
1909 ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1910 _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1911 _marking_context(_heap->marking_context()) {}
1912 void do_oop(oop* p) { do_oop_work(p); }
1913 void do_oop(narrowOop* p) { do_oop_work(p); }
1914 };
1915
1916 // Object iterator for parallel heap iteraion.
1917 // The root scanning phase happenes in construction as a preparation of
1918 // parallel marking queues.
1919 // Every worker processes it's own marking queue. work-stealing is used
1920 // to balance workload.
1921 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1922 private:
1923 uint _num_workers;
1924 bool _init_ready;
1925 MarkBitMap* _aux_bit_map;
1926 ShenandoahHeap* _heap;
1927 ShenandoahScanObjectStack _roots_stack; // global roots stack
1928 ShenandoahObjToScanQueueSet* _task_queues;
1929 public:
1930 ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1931 _num_workers(num_workers),
1932 _init_ready(false),
1933 _aux_bit_map(bitmap),
1934 _heap(ShenandoahHeap::heap()) {
1935 // Initialize bitmap
1936 _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1937 if (!_init_ready) {
1938 return;
1939 }
1940
1941 ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1942 _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1943
1944 _init_ready = prepare_worker_queues();
1945 }
1946
1947 ~ShenandoahParallelObjectIterator() {
1948 // Reclaim bitmap
1949 _heap->reclaim_aux_bitmap_for_iteration();
1950 // Reclaim queue for workers
1951 if (_task_queues!= nullptr) {
1952 for (uint i = 0; i < _num_workers; ++i) {
1953 ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1954 if (q != nullptr) {
1955 delete q;
1956 _task_queues->register_queue(i, nullptr);
1957 }
1958 }
1959 delete _task_queues;
1960 _task_queues = nullptr;
1961 }
1962 }
1963
1964 virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1965 if (_init_ready) {
1966 object_iterate_parallel(cl, worker_id, _task_queues);
1967 }
1968 }
1969
1970 private:
1971 // Divide global root_stack into worker queues
1972 bool prepare_worker_queues() {
1973 _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1974 // Initialize queues for every workers
1975 for (uint i = 0; i < _num_workers; ++i) {
1976 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1977 _task_queues->register_queue(i, task_queue);
1978 }
1979 // Divide roots among the workers. Assume that object referencing distribution
1980 // is related with root kind, use round-robin to make every worker have same chance
1981 // to process every kind of roots
1982 size_t roots_num = _roots_stack.size();
1983 if (roots_num == 0) {
1984 // No work to do
1985 return false;
1986 }
1987
1988 for (uint j = 0; j < roots_num; j++) {
1989 uint stack_id = j % _num_workers;
1990 oop obj = _roots_stack.pop();
1991 _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1992 }
1993 return true;
1994 }
1995
1996 void object_iterate_parallel(ObjectClosure* cl,
1997 uint worker_id,
1998 ShenandoahObjToScanQueueSet* queue_set) {
1999 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
2000 assert(queue_set != nullptr, "task queue must not be null");
2001
2002 ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
2003 assert(q != nullptr, "object iterate queue must not be null");
2004
2005 ShenandoahMarkTask t;
2006 ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
2007
2008 // Work through the queue to traverse heap.
2009 // Steal when there is no task in queue.
2010 while (q->pop(t) || queue_set->steal(worker_id, t)) {
2011 oop obj = t.obj();
2012 assert(oopDesc::is_oop(obj), "must be a valid oop");
2013 cl->do_object(obj);
2014 obj->oop_iterate(&oops);
2015 }
2016 assert(q->is_empty(), "should be empty");
2017 }
2018 };
2019
2020 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
2021 return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
2022 }
2023
2024 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
2025 void ShenandoahHeap::keep_alive(oop obj) {
2026 if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
2027 ShenandoahBarrierSet::barrier_set()->enqueue(obj);
2028 }
2029 }
2030
2031 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2032 for (size_t i = 0; i < num_regions(); i++) {
2033 ShenandoahHeapRegion* current = get_region(i);
2034 blk->heap_region_do(current);
2035 }
2036 }
2037
2038 class ShenandoahHeapRegionIteratorTask : public WorkerTask {
2039 private:
2040 ShenandoahRegionIterator _regions;
2041 ShenandoahHeapRegionClosure* _closure;
2042
2043 public:
2044 ShenandoahHeapRegionIteratorTask(ShenandoahHeapRegionClosure* closure)
2045 : WorkerTask("Shenandoah Heap Region Iterator")
2046 , _closure(closure) {}
2047
2048 void work(uint worker_id) override {
2049 ShenandoahParallelWorkerSession worker_session(worker_id);
2050 ShenandoahHeapRegion* region = _regions.next();
2051 while (region != nullptr) {
2052 _closure->heap_region_do(region);
2053 region = _regions.next();
2054 }
2055 }
2056 };
2057
2058 class ShenandoahParallelHeapRegionTask : public WorkerTask {
2059 private:
2060 ShenandoahHeap* const _heap;
2061 ShenandoahHeapRegionClosure* const _blk;
2062 size_t const _stride;
2063
2064 shenandoah_padding(0);
2065 Atomic<size_t> _index;
2066 shenandoah_padding(1);
2067
2068 public:
2069 ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
2070 WorkerTask("Shenandoah Parallel Region Operation"),
2071 _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
2072
2073 void work(uint worker_id) {
2074 ShenandoahParallelWorkerSession worker_session(worker_id);
2075 size_t stride = _stride;
2076
2077 size_t max = _heap->num_regions();
2078 while (_index.load_relaxed() < max) {
2079 size_t cur = _index.fetch_then_add(stride, memory_order_relaxed);
2080 size_t start = cur;
2081 size_t end = MIN2(cur + stride, max);
2082 if (start >= max) break;
2083
2084 for (size_t i = cur; i < end; i++) {
2085 ShenandoahHeapRegion* current = _heap->get_region(i);
2086 _blk->heap_region_do(current);
2087 }
2088 }
2089 }
2090 };
2091
2092 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2093 assert(blk->is_thread_safe(), "Only thread-safe closures here");
2094 const uint active_workers = workers()->active_workers();
2095 const size_t n_regions = num_regions();
2096 size_t stride = blk->parallel_region_stride();
2097 if (stride == 0 && active_workers > 1) {
2098 // Automatically derive the stride to balance the work between threads
2099 // evenly. Do not try to split work if below the reasonable threshold.
2100 constexpr size_t threshold = 4096;
2101 stride = n_regions <= threshold ?
2102 threshold :
2103 (n_regions + active_workers - 1) / active_workers;
2104 }
2105
2106 if (n_regions > stride && active_workers > 1) {
2107 ShenandoahParallelHeapRegionTask task(blk, stride);
2108 workers()->run_task(&task);
2109 } else {
2110 heap_region_iterate(blk);
2111 }
2112 }
2113
2114 void ShenandoahHeap::heap_region_iterator(ShenandoahHeapRegionClosure* closure) const {
2115 ShenandoahHeapRegionIteratorTask task(closure);
2116 workers()->run_task(&task);
2117 }
2118
2119 class ShenandoahRendezvousHandshakeClosure : public HandshakeClosure {
2120 public:
2121 inline ShenandoahRendezvousHandshakeClosure(const char* name) : HandshakeClosure(name) {}
2122 inline void do_thread(Thread* thread) {}
2123 };
2124
2125 void ShenandoahHeap::rendezvous_threads(const char* name) {
2126 ShenandoahRendezvousHandshakeClosure cl(name);
2127 Handshake::execute(&cl);
2128 }
2129
2130 void ShenandoahHeap::recycle_trash() {
2131 free_set()->recycle_trash();
2132 }
2133
2134 void ShenandoahHeap::do_class_unloading() {
2135 _unloader.unload();
2136 if (mode()->is_generational()) {
2137 old_generation()->set_parsable(false);
2138 }
2139 }
2140
2141 void ShenandoahHeap::stw_weak_refs(ShenandoahGeneration* generation, bool full_gc) {
2142 // Weak refs processing
2143 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2144 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2145 ShenandoahTimingsTracker t(phase);
2146 ShenandoahGCWorkerPhase worker_phase(phase);
2147 generation->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2148 }
2149
2150 void ShenandoahHeap::prepare_update_heap_references() {
2151 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2152
2153 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2154 // make them parsable for update code to work correctly. Plus, we can compute new sizes
2155 // for future GCLABs here.
2156 if (UseTLAB) {
2157 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2158 gclabs_retire(ResizeTLAB);
2159 }
2160
2161 _update_refs_iterator.reset();
2162 }
2163
2164 void ShenandoahHeap::propagate_gc_state_to_all_threads() {
2165 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2166 if (_gc_state_changed) {
2167 // If we are only marking old, we do not need to process young pointers
2168 ShenandoahBarrierSet::satb_mark_queue_set().set_filter_out_young(
2169 is_concurrent_old_mark_in_progress() && !is_concurrent_young_mark_in_progress()
2170 );
2171 ShenandoahGCStatePropagatorHandshakeClosure propagator(_gc_state.raw_value());
2172 Threads::threads_do(&propagator);
2173 _gc_state_changed = false;
2174 }
2175 }
2176
2177 void ShenandoahHeap::set_gc_state_at_safepoint(uint mask, bool value) {
2178 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2179 _gc_state.set_cond(mask, value);
2180 _gc_state_changed = true;
2181 }
2182
2183 void ShenandoahHeap::set_gc_state_concurrent(uint mask, bool value) {
2184 // Holding the thread lock here assures that any thread created after we change the gc
2185 // state will have the correct state. It also prevents attaching threads from seeing
2186 // an inconsistent state. See ShenandoahBarrierSet::on_thread_attach for reference. Established
2187 // threads will use their thread local copy of the gc state (changed by a handshake, or on a
2188 // safepoint).
2189 assert(Threads_lock->is_locked(), "Must hold thread lock for concurrent gc state change");
2190 _gc_state.set_cond(mask, value);
2191 }
2192
2193 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2194 uint mask;
2195 assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
2196 if (!in_progress && is_concurrent_old_mark_in_progress()) {
2197 assert(mode()->is_generational(), "Only generational GC has old marking");
2198 assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
2199 // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
2200 mask = YOUNG_MARKING;
2201 } else {
2202 mask = MARKING | YOUNG_MARKING;
2203 }
2204 set_gc_state_at_safepoint(mask, in_progress);
2205 manage_satb_barrier(in_progress);
2206 }
2207
2208 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2209 #ifdef ASSERT
2210 // has_forwarded_objects() iff UPDATE_REFS or EVACUATION
2211 bool has_forwarded = has_forwarded_objects();
2212 bool updating_or_evacuating = _gc_state.is_set(UPDATE_REFS | EVACUATION);
2213 bool evacuating = _gc_state.is_set(EVACUATION);
2214 assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
2215 "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
2216 #endif
2217 if (!in_progress && is_concurrent_young_mark_in_progress()) {
2218 // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2219 assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2220 set_gc_state_at_safepoint(OLD_MARKING, in_progress);
2221 } else {
2222 set_gc_state_at_safepoint(MARKING | OLD_MARKING, in_progress);
2223 }
2224 manage_satb_barrier(in_progress);
2225 }
2226
2227 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2228 return old_generation()->is_preparing_for_mark();
2229 }
2230
2231 void ShenandoahHeap::manage_satb_barrier(bool active) {
2232 if (is_concurrent_mark_in_progress()) {
2233 // Ignore request to deactivate barrier while concurrent mark is in progress.
2234 // Do not attempt to re-activate the barrier if it is already active.
2235 if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2236 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2237 }
2238 } else {
2239 // No concurrent marking is in progress so honor request to deactivate,
2240 // but only if the barrier is already active.
2241 if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2242 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2243 }
2244 }
2245 }
2246
2247 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2248 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2249 set_gc_state_at_safepoint(EVACUATION, in_progress);
2250 }
2251
2252 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2253 if (in_progress) {
2254 _concurrent_strong_root_in_progress.set();
2255 } else {
2256 _concurrent_strong_root_in_progress.unset();
2257 }
2258 }
2259
2260 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2261 set_gc_state_at_safepoint(WEAK_ROOTS, cond);
2262 }
2263
2264 GCTracer* ShenandoahHeap::tracer() {
2265 return shenandoah_policy()->tracer();
2266 }
2267
2268 size_t ShenandoahHeap::tlab_used() const {
2269 return _free_set->used_not_holding_lock();
2270 }
2271
2272 bool ShenandoahHeap::try_cancel_gc(GCCause::Cause cause) {
2273 const GCCause::Cause prev = _cancelled_gc.xchg(cause);
2274 return prev == GCCause::_no_gc || prev == GCCause::_shenandoah_concurrent_gc;
2275 }
2276
2277 void ShenandoahHeap::cancel_concurrent_mark() {
2278 if (mode()->is_generational()) {
2279 young_generation()->cancel_marking();
2280 old_generation()->cancel_marking();
2281 }
2282
2283 global_generation()->cancel_marking();
2284
2285 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2286 }
2287
2288 bool ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2289 if (try_cancel_gc(cause)) {
2290 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2291 log_info(gc,thread)("%s", msg.buffer());
2292 Events::log(Thread::current(), "%s", msg.buffer());
2293 _cancel_requested_time = os::elapsedTime();
2294 return true;
2295 }
2296 return false;
2297 }
2298
2299 uint ShenandoahHeap::max_workers() {
2300 return _max_workers;
2301 }
2302
2303 void ShenandoahHeap::stop() {
2304 // The shutdown sequence should be able to terminate when GC is running.
2305
2306 // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2307 _shenandoah_policy->record_shutdown();
2308
2309 // Step 1. Stop reporting on gc thread cpu utilization
2310 mmu_tracker()->stop();
2311
2312 // Step 2. Wait until GC worker exits normally (this will cancel any ongoing GC).
2313 control_thread()->stop();
2314
2315 // Stop 4. Shutdown uncommit thread.
2316 if (_uncommit_thread != nullptr) {
2317 _uncommit_thread->stop();
2318 }
2319 }
2320
2321 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2322 if (!unload_classes()) return;
2323 ClassUnloadingContext ctx(_workers->active_workers(),
2324 true /* unregister_nmethods_during_purge */,
2325 false /* lock_nmethod_free_separately */);
2326
2327 // Unload classes and purge SystemDictionary.
2328 {
2329 ShenandoahPhaseTimings::Phase phase = full_gc ?
2330 ShenandoahPhaseTimings::full_gc_purge_class_unload :
2331 ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2332 ShenandoahIsAliveSelector is_alive;
2333 {
2334 CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
2335 ShenandoahGCPhase gc_phase(phase);
2336 ShenandoahGCWorkerPhase worker_phase(phase);
2337 bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
2338
2339 // Clean JVMCI metadata handles.
2340 JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
2341
2342 ShenandoahClassUnloadingTask unlink_task(phase, unloading_occurred);
2343 _workers->run_task(&unlink_task);
2344 }
2345 // Release unloaded nmethods's memory.
2346 ClassUnloadingContext::context()->purge_and_free_nmethods();
2347 }
2348
2349 {
2350 ShenandoahGCPhase phase(full_gc ?
2351 ShenandoahPhaseTimings::full_gc_purge_cldg :
2352 ShenandoahPhaseTimings::degen_gc_purge_cldg);
2353 ClassLoaderDataGraph::purge(true /* at_safepoint */);
2354 }
2355 // Resize and verify metaspace
2356 MetaspaceGC::compute_new_size();
2357
2358 if (mode()->is_generational()) {
2359 old_generation()->set_parsable(false);
2360 }
2361
2362 DEBUG_ONLY(MetaspaceUtils::verify();)
2363 }
2364
2365 // Weak roots are either pre-evacuated (final mark) or updated (final update refs),
2366 // so they should not have forwarded oops.
2367 // However, we do need to "null" dead oops in the roots, if can not be done
2368 // in concurrent cycles.
2369 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2370 uint num_workers = _workers->active_workers();
2371 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2372 ShenandoahPhaseTimings::full_gc_purge_weak_par :
2373 ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2374 ShenandoahGCPhase phase(timing_phase);
2375 ShenandoahGCWorkerPhase worker_phase(timing_phase);
2376 // Cleanup weak roots
2377 if (has_forwarded_objects()) {
2378 ShenandoahForwardedIsAliveClosure is_alive;
2379 ShenandoahNonConcUpdateRefsClosure keep_alive;
2380 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahNonConcUpdateRefsClosure>
2381 cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2382 _workers->run_task(&cleaning_task);
2383 } else {
2384 ShenandoahIsAliveClosure is_alive;
2385 #ifdef ASSERT
2386 ShenandoahAssertNotForwardedClosure verify_cl;
2387 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2388 cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2389 #else
2390 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2391 cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2392 #endif
2393 _workers->run_task(&cleaning_task);
2394 }
2395 }
2396
2397 void ShenandoahHeap::parallel_cleaning(ShenandoahGeneration* generation, bool full_gc) {
2398 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2399 assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2400 ShenandoahGCPhase phase(full_gc ?
2401 ShenandoahPhaseTimings::full_gc_purge :
2402 ShenandoahPhaseTimings::degen_gc_purge);
2403 stw_weak_refs(generation, full_gc);
2404 stw_process_weak_roots(full_gc);
2405 stw_unload_classes(full_gc);
2406 }
2407
2408 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2409 set_gc_state_at_safepoint(HAS_FORWARDED, cond);
2410 }
2411
2412 void ShenandoahHeap::set_unload_classes(bool uc) {
2413 _unload_classes.set_cond(uc);
2414 }
2415
2416 bool ShenandoahHeap::unload_classes() const {
2417 return _unload_classes.is_set();
2418 }
2419
2420 address ShenandoahHeap::in_cset_fast_test_addr() {
2421 ShenandoahHeap* heap = ShenandoahHeap::heap();
2422 assert(heap->collection_set() != nullptr, "Sanity");
2423 return (address) heap->collection_set()->biased_map_address();
2424 }
2425
2426 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2427 // It is important to force_alloc_rate_sample() before the associated generation's bytes_allocated has been reset.
2428 // Note that we obtain heap lock to prevent additional allocations between sampling bytes_allocated_since_gc_start()
2429 // and reset_bytes_allocated_since_gc_start()
2430 {
2431 ShenandoahHeapLocker locker(lock());
2432 // unaccounted_bytes is the bytes not accounted for by our forced sample. If the sample interval is too short,
2433 // the "forced sample" will not happen, and any recently allocated bytes are "unaccounted for". We pretend these
2434 // bytes are allocated after the start of subsequent gc.
2435 size_t unaccounted_bytes;
2436 size_t bytes_allocated = _free_set->get_bytes_allocated_since_gc_start();
2437 if (mode()->is_generational()) {
2438 unaccounted_bytes = young_generation()->heuristics()->force_alloc_rate_sample(bytes_allocated);
2439 } else {
2440 // Single-gen Shenandoah uses global heuristics.
2441 unaccounted_bytes = heuristics()->force_alloc_rate_sample(bytes_allocated);
2442 }
2443 _free_set->reset_bytes_allocated_since_gc_start(unaccounted_bytes);
2444 }
2445 }
2446
2447 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2448 _degenerated_gc_in_progress.set_cond(in_progress);
2449 }
2450
2451 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2452 _full_gc_in_progress.set_cond(in_progress);
2453 }
2454
2455 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2456 assert (is_full_gc_in_progress(), "should be");
2457 _full_gc_move_in_progress.set_cond(in_progress);
2458 }
2459
2460 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2461 set_gc_state_at_safepoint(UPDATE_REFS, in_progress);
2462 }
2463
2464 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2465 ShenandoahCodeRoots::register_nmethod(nm);
2466 }
2467
2468 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2469 ShenandoahCodeRoots::unregister_nmethod(nm);
2470 }
2471
2472 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2473 heap_region_containing(o)->record_pin();
2474 }
2475
2476 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2477 ShenandoahHeapRegion* r = heap_region_containing(o);
2478 assert(r != nullptr, "Sanity");
2479 assert(r->pin_count() > 0, "Region %zu should have non-zero pins", r->index());
2480 r->record_unpin();
2481 }
2482
2483 void ShenandoahHeap::sync_pinned_region_status() {
2484 ShenandoahHeapLocker locker(lock());
2485
2486 for (size_t i = 0; i < num_regions(); i++) {
2487 ShenandoahHeapRegion *r = get_region(i);
2488 if (r->is_active()) {
2489 if (r->is_pinned()) {
2490 if (r->pin_count() == 0) {
2491 r->make_unpinned();
2492 }
2493 } else {
2494 if (r->pin_count() > 0) {
2495 r->make_pinned();
2496 }
2497 }
2498 }
2499 }
2500
2501 assert_pinned_region_status();
2502 }
2503
2504 #ifdef ASSERT
2505 void ShenandoahHeap::assert_pinned_region_status() const {
2506 assert_pinned_region_status(global_generation());
2507 }
2508
2509 void ShenandoahHeap::assert_pinned_region_status(ShenandoahGeneration* generation) const {
2510 for (size_t i = 0; i < num_regions(); i++) {
2511 ShenandoahHeapRegion* r = get_region(i);
2512 if (generation->contains(r)) {
2513 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2514 "Region %zu pinning status is inconsistent", i);
2515 }
2516 }
2517 }
2518 #endif
2519
2520 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2521 return _gc_timer;
2522 }
2523
2524 void ShenandoahHeap::prepare_concurrent_roots() {
2525 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2526 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2527 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2528 set_concurrent_weak_root_in_progress(true);
2529 if (unload_classes()) {
2530 _unloader.prepare();
2531 }
2532 }
2533
2534 void ShenandoahHeap::finish_concurrent_roots() {
2535 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2536 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2537 if (unload_classes()) {
2538 _unloader.finish();
2539 }
2540 }
2541
2542 #ifdef ASSERT
2543 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2544 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2545
2546 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2547 // Use ParallelGCThreads inside safepoints
2548 assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2549 ParallelGCThreads, nworkers);
2550 } else {
2551 // Use ConcGCThreads outside safepoints
2552 assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2553 ConcGCThreads, nworkers);
2554 }
2555 }
2556 #endif
2557
2558 ShenandoahVerifier* ShenandoahHeap::verifier() {
2559 guarantee(ShenandoahVerify, "Should be enabled");
2560 assert (_verifier != nullptr, "sanity");
2561 return _verifier;
2562 }
2563
2564 template<bool CONCURRENT>
2565 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2566 private:
2567 ShenandoahHeap* _heap;
2568 ShenandoahRegionIterator* _regions;
2569 public:
2570 explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2571 WorkerTask("Shenandoah Update References"),
2572 _heap(ShenandoahHeap::heap()),
2573 _regions(regions) {
2574 }
2575
2576 void work(uint worker_id) {
2577 if (CONCURRENT) {
2578 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2579 SuspendibleThreadSetJoiner stsj;
2580 do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2581 } else {
2582 ShenandoahParallelWorkerSession worker_session(worker_id);
2583 do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
2584 }
2585 }
2586
2587 private:
2588 template<class T>
2589 void do_work(uint worker_id) {
2590 if (CONCURRENT && (worker_id == 0)) {
2591 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2592 // results of evacuation. These reserves are no longer necessary because evacuation has completed.
2593 size_t cset_regions = _heap->collection_set()->count();
2594
2595 // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2596 // to the mutator free set. At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2597 // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2598 // next GC cycle.
2599 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2600 }
2601 // If !CONCURRENT, there's no value in expanding Mutator free set
2602 T cl;
2603 ShenandoahHeapRegion* r = _regions->next();
2604 while (r != nullptr) {
2605 HeapWord* update_watermark = r->get_update_watermark();
2606 assert (update_watermark >= r->bottom(), "sanity");
2607 if (r->is_active() && !r->is_cset()) {
2608 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2609 }
2610 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2611 return;
2612 }
2613 r = _regions->next();
2614 }
2615 }
2616 };
2617
2618 void ShenandoahHeap::update_heap_references(ShenandoahGeneration* generation, bool concurrent) {
2619 assert(generation->is_global(), "Should only get global generation here");
2620 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2621
2622 if (concurrent) {
2623 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2624 workers()->run_task(&task);
2625 } else {
2626 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2627 workers()->run_task(&task);
2628 }
2629 }
2630
2631 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2632 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2633 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2634
2635 {
2636 ShenandoahGCPhase phase(concurrent ?
2637 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2638 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2639
2640 final_update_refs_update_region_states();
2641
2642 assert_pinned_region_status();
2643 }
2644
2645 {
2646 ShenandoahGCPhase phase(concurrent ?
2647 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2648 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2649 trash_cset_regions();
2650 }
2651 }
2652
2653 void ShenandoahHeap::final_update_refs_update_region_states() {
2654 ShenandoahSynchronizePinnedRegionStates cl;
2655 parallel_heap_region_iterate(&cl);
2656 }
2657
2658 void ShenandoahHeap::rebuild_free_set_within_phase() {
2659 ShenandoahHeapLocker locker(lock());
2660 size_t young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count;
2661 _free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count);
2662 // If there are no old regions, first_old_region will be greater than last_old_region
2663 assert((first_old_region > last_old_region) ||
2664 ((last_old_region + 1 - first_old_region >= old_region_count) &&
2665 get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2666 "sanity: old_region_count: %zu, first_old_region: %zu, last_old_region: %zu",
2667 old_region_count, first_old_region, last_old_region);
2668
2669 if (mode()->is_generational()) {
2670 #ifdef ASSERT
2671 if (ShenandoahVerify) {
2672 verifier()->verify_before_rebuilding_free_set();
2673 }
2674 #endif
2675
2676 // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2677 // available for transfer to old. Note that transfer of humongous regions does not impact available.
2678 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2679 size_t allocation_runway =
2680 gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trashed_regions);
2681 gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
2682 }
2683 // Rebuild free set based on adjusted generation sizes.
2684 _free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, old_region_count);
2685
2686 if (mode()->is_generational()) {
2687 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2688 ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2689 old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2690 }
2691 }
2692
2693 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2694 ShenandoahGCPhase phase(concurrent ?
2695 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2696 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2697 rebuild_free_set_within_phase();
2698 }
2699
2700 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2701 size_t slice = r->index() / _bitmap_regions_per_slice;
2702
2703 size_t regions_from = _bitmap_regions_per_slice * slice;
2704 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2705 for (size_t g = regions_from; g < regions_to; g++) {
2706 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2707 if (skip_self && g == r->index()) continue;
2708 if (get_region(g)->is_committed()) {
2709 return true;
2710 }
2711 }
2712 return false;
2713 }
2714
2715 void ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2716 shenandoah_assert_heaplocked();
2717 assert(!is_bitmap_region_special(), "Not for special memory");
2718
2719 if (is_bitmap_slice_committed(r, true)) {
2720 // Some other region from the group is already committed, meaning the bitmap
2721 // slice is already committed, we exit right away.
2722 return;
2723 }
2724
2725 // Commit the bitmap slice:
2726 size_t slice = r->index() / _bitmap_regions_per_slice;
2727 size_t off = _bitmap_bytes_per_slice * slice;
2728 size_t len = _bitmap_bytes_per_slice;
2729 char* start = (char*) _bitmap_region.start() + off;
2730
2731 os::commit_memory_or_exit(start, len, false, "Unable to commit bitmap slice");
2732
2733 if (AlwaysPreTouch) {
2734 os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2735 }
2736 }
2737
2738 void ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2739 shenandoah_assert_heaplocked();
2740 assert(!is_bitmap_region_special(), "Not for special memory");
2741
2742 if (is_bitmap_slice_committed(r, true)) {
2743 // Some other region from the group is still committed, meaning the bitmap
2744 // slice should stay committed, exit right away.
2745 return;
2746 }
2747
2748 // Uncommit the bitmap slice:
2749 size_t slice = r->index() / _bitmap_regions_per_slice;
2750 size_t off = _bitmap_bytes_per_slice * slice;
2751 size_t len = _bitmap_bytes_per_slice;
2752
2753 char* addr = (char*) _bitmap_region.start() + off;
2754 os::uncommit_memory(addr, len);
2755 }
2756
2757 void ShenandoahHeap::forbid_uncommit() {
2758 if (_uncommit_thread != nullptr) {
2759 _uncommit_thread->forbid_uncommit();
2760 }
2761 }
2762
2763 void ShenandoahHeap::allow_uncommit() {
2764 if (_uncommit_thread != nullptr) {
2765 _uncommit_thread->allow_uncommit();
2766 }
2767 }
2768
2769 #ifdef ASSERT
2770 bool ShenandoahHeap::is_uncommit_in_progress() {
2771 if (_uncommit_thread != nullptr) {
2772 return _uncommit_thread->is_uncommit_in_progress();
2773 }
2774 return false;
2775 }
2776 #endif
2777
2778 void ShenandoahHeap::safepoint_synchronize_begin() {
2779 StackWatermarkSet::safepoint_synchronize_begin();
2780 SuspendibleThreadSet::synchronize();
2781 }
2782
2783 void ShenandoahHeap::safepoint_synchronize_end() {
2784 SuspendibleThreadSet::desynchronize();
2785 }
2786
2787 void ShenandoahHeap::try_inject_alloc_failure() {
2788 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2789 _inject_alloc_failure.set();
2790 os::naked_short_sleep(1);
2791 if (cancelled_gc()) {
2792 log_info(gc)("Allocation failure was successfully injected");
2793 }
2794 }
2795 }
2796
2797 bool ShenandoahHeap::should_inject_alloc_failure() {
2798 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2799 }
2800
2801 void ShenandoahHeap::initialize_serviceability() {
2802 _memory_pool = new ShenandoahMemoryPool(this);
2803 _cycle_memory_manager.add_pool(_memory_pool);
2804 _stw_memory_manager.add_pool(_memory_pool);
2805 }
2806
2807 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2808 GrowableArray<GCMemoryManager*> memory_managers(2);
2809 memory_managers.append(&_cycle_memory_manager);
2810 memory_managers.append(&_stw_memory_manager);
2811 return memory_managers;
2812 }
2813
2814 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2815 GrowableArray<MemoryPool*> memory_pools(1);
2816 memory_pools.append(_memory_pool);
2817 return memory_pools;
2818 }
2819
2820 MemoryUsage ShenandoahHeap::memory_usage() {
2821 return shenandoah_memory_usage(_initial_size, used(), committed(), max_capacity());
2822 }
2823
2824 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2825 _heap(ShenandoahHeap::heap()),
2826 _index(0) {}
2827
2828 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2829 _heap(heap),
2830 _index(0) {}
2831
2832 void ShenandoahRegionIterator::reset() {
2833 _index.store_relaxed(0);
2834 }
2835
2836 bool ShenandoahRegionIterator::has_next() const {
2837 return _index.load_relaxed() < _heap->num_regions();
2838 }
2839
2840 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2841 #ifdef ASSERT
2842 assert(_liveness_cache != nullptr, "sanity");
2843 assert(worker_id < _max_workers, "sanity");
2844 for (uint i = 0; i < num_regions(); i++) {
2845 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2846 }
2847 #endif
2848 return _liveness_cache[worker_id];
2849 }
2850
2851 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2852 assert(worker_id < _max_workers, "sanity");
2853 assert(_liveness_cache != nullptr, "sanity");
2854 ShenandoahLiveData* ld = _liveness_cache[worker_id];
2855 for (uint i = 0; i < num_regions(); i++) {
2856 ShenandoahLiveData live = ld[i];
2857 if (live > 0) {
2858 ShenandoahHeapRegion* r = get_region(i);
2859 r->increase_live_data_gc_words(live);
2860 ld[i] = 0;
2861 }
2862 }
2863 }
2864
2865 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2866 if (is_idle()) return false;
2867
2868 // Objects allocated after marking start are implicitly alive, don't need any barriers during
2869 // marking phase.
2870 if (is_concurrent_mark_in_progress() &&
2871 !marking_context()->allocated_after_mark_start(obj)) {
2872 return true;
2873 }
2874
2875 // Can not guarantee obj is deeply good.
2876 if (has_forwarded_objects()) {
2877 return true;
2878 }
2879
2880 return false;
2881 }
2882
2883 HeapWord* ShenandoahHeap::allocate_loaded_archive_space(size_t size) {
2884 #if INCLUDE_CDS_JAVA_HEAP
2885 // CDS wants a raw continuous memory range to load a bunch of objects itself.
2886 // This is an unusual request, since all requested regions should be regular, not humongous.
2887 //
2888 // CDS would guarantee no objects straddle multiple regions, as long as regions are as large
2889 // as MIN_GC_REGION_ALIGNMENT.
2890 guarantee(ShenandoahHeapRegion::region_size_bytes() >= AOTMappedHeapWriter::MIN_GC_REGION_ALIGNMENT, "Must be");
2891
2892 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_cds(size);
2893 return allocate_memory(req);
2894 #else
2895 assert(false, "Archive heap loader should not be available, should not be here");
2896 return nullptr;
2897 #endif // INCLUDE_CDS_JAVA_HEAP
2898 }
2899
2900 void ShenandoahHeap::complete_loaded_archive_space(MemRegion archive_space) {
2901 // Nothing to do here, except checking that heap looks fine.
2902 #ifdef ASSERT
2903 HeapWord* start = archive_space.start();
2904 HeapWord* end = archive_space.end();
2905
2906 // No unclaimed space between the objects.
2907 // Objects are properly allocated in correct regions.
2908 HeapWord* cur = start;
2909 while (cur < end) {
2910 oop oop = cast_to_oop(cur);
2911 shenandoah_assert_in_correct_region(nullptr, oop);
2912 cur += oop->size();
2913 }
2914
2915 // No unclaimed tail at the end of archive space.
2916 assert(cur == end,
2917 "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT,
2918 p2i(cur), p2i(end));
2919
2920 // All regions in contiguous space have good state.
2921 size_t begin_reg_idx = heap_region_index_containing(start);
2922 size_t end_reg_idx = heap_region_index_containing(end);
2923
2924 for (size_t idx = begin_reg_idx; idx <= end_reg_idx; idx++) {
2925 ShenandoahHeapRegion* r = get_region(idx);
2926 assert(r->is_regular(), "Must be regular");
2927 assert(r->is_young(), "Must be young");
2928 assert(idx == end_reg_idx || r->top() == r->end(),
2929 "All regions except the last one should be full: " PTR_FORMAT " " PTR_FORMAT,
2930 p2i(r->top()), p2i(r->end()));
2931 assert(idx != begin_reg_idx || r->bottom() == start,
2932 "Archive space start should be at the bottom of first region: " PTR_FORMAT " " PTR_FORMAT,
2933 p2i(r->bottom()), p2i(start));
2934 assert(idx != end_reg_idx || r->top() == end,
2935 "Archive space end should be at the top of last region: " PTR_FORMAT " " PTR_FORMAT,
2936 p2i(r->top()), p2i(end));
2937 }
2938
2939 #endif
2940 }
2941
2942 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2943 if (!mode()->is_generational()) {
2944 return global_generation();
2945 } else if (affiliation == YOUNG_GENERATION) {
2946 return young_generation();
2947 } else if (affiliation == OLD_GENERATION) {
2948 return old_generation();
2949 }
2950
2951 ShouldNotReachHere();
2952 return nullptr;
2953 }
2954
2955 void ShenandoahHeap::log_heap_status(const char* msg) const {
2956 if (mode()->is_generational()) {
2957 young_generation()->log_status(msg);
2958 old_generation()->log_status(msg);
2959 } else {
2960 global_generation()->log_status(msg);
2961 }
2962 }
2963
2964 ShenandoahHeapLocker::ShenandoahHeapLocker(ShenandoahHeapLock* lock, bool allow_block_for_safepoint) : _lock(lock) {
2965 #ifdef ASSERT
2966 ShenandoahFreeSet* free_set = ShenandoahHeap::heap()->free_set();
2967 // free_set is nullptr only at pre-initialized state
2968 assert(free_set == nullptr || !free_set->rebuild_lock()->owned_by_self(), "Dead lock, can't acquire heap lock while holding free-set rebuild lock");
2969 assert(_lock != nullptr, "Must not");
2970 #endif
2971 _lock->lock(allow_block_for_safepoint);
2972 }