1 /*
2 * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "memory/allocation.hpp"
28 #include "memory/universe.hpp"
29
30 #include "gc/shared/classUnloadingContext.hpp"
31 #include "gc/shared/gcArguments.hpp"
32 #include "gc/shared/gcTimer.hpp"
33 #include "gc/shared/gcTraceTime.inline.hpp"
34 #include "gc/shared/locationPrinter.inline.hpp"
35 #include "gc/shared/memAllocator.hpp"
36 #include "gc/shared/plab.hpp"
37 #include "gc/shared/tlab_globals.hpp"
38
39 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
40 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
41 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
42 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
43 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahControlThread.hpp"
46 #include "gc/shenandoah/shenandoahFreeSet.hpp"
47 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
48 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
49 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
50 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
51 #include "gc/shenandoah/shenandoahInitLogger.hpp"
52 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
53 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
54 #include "gc/shenandoah/shenandoahMetrics.hpp"
55 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
56 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
58 #include "gc/shenandoah/shenandoahPadding.hpp"
59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
60 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
61 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
63 #include "gc/shenandoah/shenandoahUtils.hpp"
64 #include "gc/shenandoah/shenandoahVerifier.hpp"
65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
69 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
70 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
71 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
72 #if INCLUDE_JFR
73 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
74 #endif
75
76 #include "classfile/systemDictionary.hpp"
77 #include "code/codeCache.hpp"
78 #include "memory/classLoaderMetaspace.hpp"
79 #include "memory/metaspaceUtils.hpp"
80 #include "oops/compressedOops.inline.hpp"
81 #include "prims/jvmtiTagMap.hpp"
82 #include "runtime/atomic.hpp"
83 #include "runtime/globals.hpp"
84 #include "runtime/interfaceSupport.inline.hpp"
85 #include "runtime/java.hpp"
86 #include "runtime/orderAccess.hpp"
87 #include "runtime/safepointMechanism.hpp"
88 #include "runtime/vmThread.hpp"
89 #include "services/mallocTracker.hpp"
90 #include "services/memTracker.hpp"
91 #include "utilities/events.hpp"
92 #include "utilities/powerOfTwo.hpp"
93
94 class ShenandoahPretouchHeapTask : public WorkerTask {
95 private:
96 ShenandoahRegionIterator _regions;
97 const size_t _page_size;
98 public:
99 ShenandoahPretouchHeapTask(size_t page_size) :
100 WorkerTask("Shenandoah Pretouch Heap"),
101 _page_size(page_size) {}
102
103 virtual void work(uint worker_id) {
104 ShenandoahHeapRegion* r = _regions.next();
105 while (r != nullptr) {
106 if (r->is_committed()) {
107 os::pretouch_memory(r->bottom(), r->end(), _page_size);
143 jint ShenandoahHeap::initialize() {
144 //
145 // Figure out heap sizing
146 //
147
148 size_t init_byte_size = InitialHeapSize;
149 size_t min_byte_size = MinHeapSize;
150 size_t max_byte_size = MaxHeapSize;
151 size_t heap_alignment = HeapAlignment;
152
153 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
154
155 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
156 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
157
158 _num_regions = ShenandoahHeapRegion::region_count();
159 assert(_num_regions == (max_byte_size / reg_size_bytes),
160 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
161 _num_regions, max_byte_size, reg_size_bytes);
162
163 // Now we know the number of regions, initialize the heuristics.
164 initialize_heuristics();
165
166 size_t num_committed_regions = init_byte_size / reg_size_bytes;
167 num_committed_regions = MIN2(num_committed_regions, _num_regions);
168 assert(num_committed_regions <= _num_regions, "sanity");
169 _initial_size = num_committed_regions * reg_size_bytes;
170
171 size_t num_min_regions = min_byte_size / reg_size_bytes;
172 num_min_regions = MIN2(num_min_regions, _num_regions);
173 assert(num_min_regions <= _num_regions, "sanity");
174 _minimum_size = num_min_regions * reg_size_bytes;
175
176 // Default to max heap size.
177 _soft_max_size = _num_regions * reg_size_bytes;
178
179 _committed = _initial_size;
180
181 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
182 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
183 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
184
185 //
186 // Reserve and commit memory for heap
187 //
188
189 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
190 initialize_reserved_region(heap_rs);
191 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
192 _heap_region_special = heap_rs.special();
193
194 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
195 "Misaligned heap: " PTR_FORMAT, p2i(base()));
196
197 #if SHENANDOAH_OPTIMIZED_MARKTASK
198 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
199 // Fail if we ever attempt to address more than we can.
200 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
201 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
202 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
203 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
204 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
205 vm_exit_during_initialization("Fatal Error", buf);
206 }
207 #endif
208
209 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
210 if (!_heap_region_special) {
211 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
212 "Cannot commit heap memory");
213 }
214
215 //
216 // Reserve and commit memory for bitmap(s)
217 //
218
219 _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
220 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
221
222 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
223
224 guarantee(bitmap_bytes_per_region != 0,
225 "Bitmap bytes per region should not be zero");
226 guarantee(is_power_of_2(bitmap_bytes_per_region),
227 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
228
229 if (bitmap_page_size > bitmap_bytes_per_region) {
230 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
231 _bitmap_bytes_per_slice = bitmap_page_size;
232 } else {
233 _bitmap_regions_per_slice = 1;
234 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
235 }
236
237 guarantee(_bitmap_regions_per_slice >= 1,
238 "Should have at least one region per slice: " SIZE_FORMAT,
239 _bitmap_regions_per_slice);
240
241 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
242 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
243 _bitmap_bytes_per_slice, bitmap_page_size);
244
245 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
246 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
247 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
248 _bitmap_region_special = bitmap.special();
249
250 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
251 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
252 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
253 if (!_bitmap_region_special) {
254 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
255 "Cannot commit bitmap memory");
256 }
257
258 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
259
260 if (ShenandoahVerify) {
261 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
262 if (!verify_bitmap.special()) {
263 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
264 "Cannot commit verification bitmap memory");
265 }
266 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
267 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
268 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
269 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
270 }
271
272 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
273 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
274 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
275 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
276 _aux_bitmap_region_special = aux_bitmap.special();
277 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
278
279 //
280 // Create regions and region sets
281 //
282 size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
283 size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
284 region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
285
286 ReservedSpace region_storage(region_storage_size, region_page_size);
287 MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
288 if (!region_storage.special()) {
289 os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
290 "Cannot commit region memory");
291 }
292
293 // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
294 // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
295 // If not successful, bite a bullet and allocate at whatever address.
296 {
297 size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
298 size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
299
300 uintptr_t min = round_up_power_of_2(cset_align);
301 uintptr_t max = (1u << 30u);
302
303 for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
304 char* req_addr = (char*)addr;
305 assert(is_aligned(req_addr, cset_align), "Should be aligned");
306 ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
307 if (cset_rs.is_reserved()) {
308 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
309 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
310 break;
311 }
312 }
313
314 if (_collection_set == nullptr) {
315 ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
316 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
317 }
318 }
319
320 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
321 _free_set = new ShenandoahFreeSet(this, _num_regions);
322
323 {
324 ShenandoahHeapLocker locker(lock());
325
326 for (size_t i = 0; i < _num_regions; i++) {
327 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
328 bool is_committed = i < num_committed_regions;
329 void* loc = region_storage.base() + i * region_align;
330
331 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
332 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
333
334 _marking_context->initialize_top_at_mark_start(r);
335 _regions[i] = r;
336 assert(!collection_set()->is_in(i), "New region should not be in collection set");
337 }
338
339 // Initialize to complete
340 _marking_context->mark_complete();
341
342 _free_set->rebuild();
343 }
344
345 if (AlwaysPreTouch) {
346 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
347 // before initialize() below zeroes it with initializing thread. For any given region,
348 // we touch the region and the corresponding bitmaps from the same thread.
349 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
350
351 _pretouch_heap_page_size = heap_page_size;
352 _pretouch_bitmap_page_size = bitmap_page_size;
353
354 #ifdef LINUX
355 // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
356 // pages. But, the kernel needs to know that every small page is used, in order to coalesce
357 // them into huge one. Therefore, we need to pretouch with smaller pages.
358 if (UseTransparentHugePages) {
359 _pretouch_heap_page_size = (size_t)os::vm_page_size();
360 _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
361 }
362 #endif
363
364 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
365 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
366
367 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
368 _workers->run_task(&bcl);
369
370 ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
371 _workers->run_task(&hcl);
372 }
373
374 //
375 // Initialize the rest of GC subsystems
376 //
377
378 _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
379 for (uint worker = 0; worker < _max_workers; worker++) {
380 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
381 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
382 }
383
384 // There should probably be Shenandoah-specific options for these,
385 // just as there are G1-specific options.
386 {
387 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
388 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
389 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
390 }
391
392 _monitoring_support = new ShenandoahMonitoringSupport(this);
393 _phase_timings = new ShenandoahPhaseTimings(max_workers());
394 ShenandoahCodeRoots::initialize();
395
396 if (ShenandoahPacing) {
397 _pacer = new ShenandoahPacer(this);
398 _pacer->setup_for_idle();
399 } else {
400 _pacer = nullptr;
401 }
402
403 _control_thread = new ShenandoahControlThread();
404
405 ShenandoahInitLogger::print();
406
407 return JNI_OK;
408 }
409
410 void ShenandoahHeap::initialize_mode() {
411 if (ShenandoahGCMode != nullptr) {
412 if (strcmp(ShenandoahGCMode, "satb") == 0) {
413 _gc_mode = new ShenandoahSATBMode();
414 } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
415 _gc_mode = new ShenandoahIUMode();
416 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
417 _gc_mode = new ShenandoahPassiveMode();
418 } else {
419 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
420 }
421 } else {
422 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
423 }
424 _gc_mode->initialize_flags();
425 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
426 vm_exit_during_initialization(
427 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
428 _gc_mode->name()));
429 }
430 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
431 vm_exit_during_initialization(
432 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
433 _gc_mode->name()));
434 }
435 }
436
437 void ShenandoahHeap::initialize_heuristics() {
438 assert(_gc_mode != nullptr, "Must be initialized");
439 _heuristics = _gc_mode->initialize_heuristics();
440
441 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
442 vm_exit_during_initialization(
443 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
444 _heuristics->name()));
445 }
446 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
447 vm_exit_during_initialization(
448 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
449 _heuristics->name()));
450 }
451 }
452
453 #ifdef _MSC_VER
454 #pragma warning( push )
455 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
456 #endif
457
458 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
459 CollectedHeap(),
460 _initial_size(0),
461 _used(0),
462 _committed(0),
463 _bytes_allocated_since_gc_start(0),
464 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
465 _workers(nullptr),
466 _safepoint_workers(nullptr),
467 _heap_region_special(false),
468 _num_regions(0),
469 _regions(nullptr),
470 _update_refs_iterator(this),
471 _gc_state_changed(false),
472 _control_thread(nullptr),
473 _shenandoah_policy(policy),
474 _gc_mode(nullptr),
475 _heuristics(nullptr),
476 _free_set(nullptr),
477 _pacer(nullptr),
478 _verifier(nullptr),
479 _phase_timings(nullptr),
480 _monitoring_support(nullptr),
481 _memory_pool(nullptr),
482 _stw_memory_manager("Shenandoah Pauses"),
483 _cycle_memory_manager("Shenandoah Cycles"),
484 _gc_timer(new ConcurrentGCTimer()),
485 _soft_ref_policy(),
486 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
487 _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
488 _marking_context(nullptr),
489 _bitmap_size(0),
490 _bitmap_regions_per_slice(0),
491 _bitmap_bytes_per_slice(0),
492 _bitmap_region_special(false),
493 _aux_bitmap_region_special(false),
494 _liveness_cache(nullptr),
495 _collection_set(nullptr)
496 {
497 // Initialize GC mode early, so we can adjust barrier support
498 initialize_mode();
499 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
500
501 _max_workers = MAX2(_max_workers, 1U);
502 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
503 if (_workers == nullptr) {
504 vm_exit_during_initialization("Failed necessary allocation.");
505 } else {
506 _workers->initialize_workers();
507 }
508
509 if (ParallelGCThreads > 1) {
510 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
511 ParallelGCThreads);
512 _safepoint_workers->initialize_workers();
513 }
514 }
515
516 #ifdef _MSC_VER
517 #pragma warning( pop )
518 #endif
519
520 class ShenandoahResetBitmapTask : public WorkerTask {
521 private:
522 ShenandoahRegionIterator _regions;
523
524 public:
525 ShenandoahResetBitmapTask() :
526 WorkerTask("Shenandoah Reset Bitmap") {}
527
528 void work(uint worker_id) {
529 ShenandoahHeapRegion* region = _regions.next();
530 ShenandoahHeap* heap = ShenandoahHeap::heap();
531 ShenandoahMarkingContext* const ctx = heap->marking_context();
532 while (region != nullptr) {
533 if (heap->is_bitmap_slice_committed(region)) {
534 ctx->clear_bitmap(region);
535 }
536 region = _regions.next();
537 }
538 }
539 };
540
541 void ShenandoahHeap::reset_mark_bitmap() {
542 assert_gc_workers(_workers->active_workers());
543 mark_incomplete_marking_context();
544
545 ShenandoahResetBitmapTask task;
546 _workers->run_task(&task);
547 }
548
549 void ShenandoahHeap::print_on(outputStream* st) const {
550 st->print_cr("Shenandoah Heap");
551 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
552 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
553 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
554 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
555 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
556 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
557 num_regions(),
558 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
559 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
560
561 st->print("Status: ");
562 if (has_forwarded_objects()) st->print("has forwarded objects, ");
563 if (is_concurrent_mark_in_progress()) st->print("marking, ");
564 if (is_evacuation_in_progress()) st->print("evacuating, ");
565 if (is_update_refs_in_progress()) st->print("updating refs, ");
566 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
567 if (is_full_gc_in_progress()) st->print("full gc, ");
568 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
569 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
570 if (is_concurrent_strong_root_in_progress() &&
571 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
572
573 if (cancelled_gc()) {
574 st->print("cancelled");
575 } else {
576 st->print("not cancelled");
577 }
578 st->cr();
579
580 st->print_cr("Reserved region:");
581 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
582 p2i(reserved_region().start()),
583 p2i(reserved_region().end()));
587 if (cset != nullptr) {
588 st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
589 st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
590 } else {
591 st->print_cr(" (null)");
592 }
593
594 st->cr();
595 MetaspaceUtils::print_on(st);
596
597 if (Verbose) {
598 st->cr();
599 print_heap_regions_on(st);
600 }
601 }
602
603 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
604 public:
605 void do_thread(Thread* thread) {
606 assert(thread != nullptr, "Sanity");
607 assert(thread->is_Worker_thread(), "Only worker thread expected");
608 ShenandoahThreadLocalData::initialize_gclab(thread);
609 }
610 };
611
612 void ShenandoahHeap::post_initialize() {
613 CollectedHeap::post_initialize();
614 MutexLocker ml(Threads_lock);
615
616 ShenandoahInitWorkerGCLABClosure init_gclabs;
617 _workers->threads_do(&init_gclabs);
618
619 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
620 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
621 _workers->set_initialize_gclab();
622 if (_safepoint_workers != nullptr) {
623 _safepoint_workers->threads_do(&init_gclabs);
624 _safepoint_workers->set_initialize_gclab();
625 }
626
627 _heuristics->initialize();
628
629 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
630 }
631
632 size_t ShenandoahHeap::used() const {
633 return Atomic::load(&_used);
634 }
635
636 size_t ShenandoahHeap::committed() const {
637 return Atomic::load(&_committed);
638 }
639
640 void ShenandoahHeap::increase_committed(size_t bytes) {
641 shenandoah_assert_heaplocked_or_safepoint();
642 _committed += bytes;
643 }
644
645 void ShenandoahHeap::decrease_committed(size_t bytes) {
646 shenandoah_assert_heaplocked_or_safepoint();
647 _committed -= bytes;
648 }
649
650 void ShenandoahHeap::increase_used(size_t bytes) {
651 Atomic::add(&_used, bytes, memory_order_relaxed);
652 }
653
654 void ShenandoahHeap::set_used(size_t bytes) {
655 Atomic::store(&_used, bytes);
656 }
657
658 void ShenandoahHeap::decrease_used(size_t bytes) {
659 assert(used() >= bytes, "never decrease heap size by more than we've left");
660 Atomic::sub(&_used, bytes, memory_order_relaxed);
661 }
662
663 void ShenandoahHeap::increase_allocated(size_t bytes) {
664 Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
665 }
666
667 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
668 size_t bytes = words * HeapWordSize;
669 if (!waste) {
670 increase_used(bytes);
671 }
672 increase_allocated(bytes);
673 if (ShenandoahPacing) {
674 control_thread()->pacing_notify_alloc(words);
675 if (waste) {
676 pacer()->claim_for_alloc(words, true);
677 }
678 }
679 }
680
681 size_t ShenandoahHeap::capacity() const {
682 return committed();
683 }
684
685 size_t ShenandoahHeap::max_capacity() const {
686 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
687 }
688
689 size_t ShenandoahHeap::soft_max_capacity() const {
690 size_t v = Atomic::load(&_soft_max_size);
691 assert(min_capacity() <= v && v <= max_capacity(),
692 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
693 min_capacity(), v, max_capacity());
694 return v;
695 }
696
697 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
698 assert(min_capacity() <= v && v <= max_capacity(),
699 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
700 min_capacity(), v, max_capacity());
701 Atomic::store(&_soft_max_size, v);
702 }
703
704 size_t ShenandoahHeap::min_capacity() const {
705 return _minimum_size;
706 }
707
708 size_t ShenandoahHeap::initial_capacity() const {
709 return _initial_size;
710 }
711
712 bool ShenandoahHeap::is_in(const void* p) const {
713 if (is_in_reserved(p)) {
714 if (is_full_gc_move_in_progress()) {
715 // Full GC move is running, we do not have a consistent region
716 // information yet. But we know the pointer is in heap.
717 return true;
718 }
719 // Now check if we point to a live section in active region.
720 ShenandoahHeapRegion* r = heap_region_containing(p);
721 return (r->is_active() && p < r->top());
722 } else {
723 return false;
724 }
725 }
726
727 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
728 assert (ShenandoahUncommit, "should be enabled");
729
730 // Application allocates from the beginning of the heap, and GC allocates at
731 // the end of it. It is more efficient to uncommit from the end, so that applications
732 // could enjoy the near committed regions. GC allocations are much less frequent,
733 // and therefore can accept the committing costs.
734
735 size_t count = 0;
736 for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
737 ShenandoahHeapRegion* r = get_region(i - 1);
738 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
739 ShenandoahHeapLocker locker(lock());
740 if (r->is_empty_committed()) {
741 if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
742 break;
743 }
744
745 r->make_uncommitted();
746 count++;
747 }
748 }
749 SpinPause(); // allow allocators to take the lock
750 }
751
752 if (count > 0) {
753 control_thread()->notify_heap_changed();
754 }
755 }
756
757 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
758 // New object should fit the GCLAB size
759 size_t min_size = MAX2(size, PLAB::min_size());
760
761 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
762 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
763 new_size = MIN2(new_size, PLAB::max_size());
764 new_size = MAX2(new_size, PLAB::min_size());
765
766 // Record new heuristic value even if we take any shortcut. This captures
767 // the case when moderately-sized objects always take a shortcut. At some point,
768 // heuristics should catch up with them.
769 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
770
771 if (new_size < size) {
772 // New size still does not fit the object. Fall back to shared allocation.
773 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
774 return nullptr;
775 }
776
777 // Retire current GCLAB, and allocate a new one.
778 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
779 gclab->retire();
780
781 size_t actual_size = 0;
782 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
783 if (gclab_buf == nullptr) {
784 return nullptr;
785 }
786
787 assert (size <= actual_size, "allocation should fit");
788
789 // ...and clear or zap just allocated TLAB, if needed.
790 if (ZeroTLAB) {
791 Copy::zero_to_words(gclab_buf, actual_size);
792 } else if (ZapTLAB) {
793 // Skip mangling the space corresponding to the object header to
794 // ensure that the returned space is not considered parsable by
795 // any concurrent GC thread.
796 size_t hdr_size = oopDesc::header_size();
797 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
798 }
799 gclab->set_buf(gclab_buf, actual_size);
800 return gclab->allocate(size);
801 }
802
803 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
804 size_t requested_size,
805 size_t* actual_size) {
806 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
807 HeapWord* res = allocate_memory(req);
808 if (res != nullptr) {
809 *actual_size = req.actual_size();
810 } else {
811 *actual_size = 0;
812 }
813 return res;
814 }
815
816 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
817 size_t word_size,
818 size_t* actual_size) {
819 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
820 HeapWord* res = allocate_memory(req);
821 if (res != nullptr) {
822 *actual_size = req.actual_size();
824 *actual_size = 0;
825 }
826 return res;
827 }
828
829 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
830 intptr_t pacer_epoch = 0;
831 bool in_new_region = false;
832 HeapWord* result = nullptr;
833
834 if (req.is_mutator_alloc()) {
835 if (ShenandoahPacing) {
836 pacer()->pace_for_alloc(req.size());
837 pacer_epoch = pacer()->epoch();
838 }
839
840 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
841 result = allocate_memory_under_lock(req, in_new_region);
842 }
843
844 // Allocation failed, block until control thread reacted, then retry allocation.
845 //
846 // It might happen that one of the threads requesting allocation would unblock
847 // way later after GC happened, only to fail the second allocation, because
848 // other threads have already depleted the free storage. In this case, a better
849 // strategy is to try again, as long as GC makes progress (or until at least
850 // one full GC has completed).
851 size_t original_count = shenandoah_policy()->full_gc_count();
852 while (result == nullptr
853 && (_progress_last_gc.is_set() || original_count == shenandoah_policy()->full_gc_count())) {
854 control_thread()->handle_alloc_failure(req);
855 result = allocate_memory_under_lock(req, in_new_region);
856 }
857 } else {
858 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
859 result = allocate_memory_under_lock(req, in_new_region);
860 // Do not call handle_alloc_failure() here, because we cannot block.
861 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
862 }
863
864 if (in_new_region) {
865 control_thread()->notify_heap_changed();
866 }
867
868 if (result != nullptr) {
869 size_t requested = req.size();
870 size_t actual = req.actual_size();
871
872 assert (req.is_lab_alloc() || (requested == actual),
873 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
874 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
875
876 if (req.is_mutator_alloc()) {
877 notify_mutator_alloc_words(actual, false);
878
879 // If we requested more than we were granted, give the rest back to pacer.
880 // This only matters if we are in the same pacing epoch: do not try to unpace
881 // over the budget for the other phase.
882 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
883 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
884 }
885 } else {
886 increase_used(actual*HeapWordSize);
887 }
888 }
889
890 return result;
891 }
892
893 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
894 // If we are dealing with mutator allocation, then we may need to block for safepoint.
895 // We cannot block for safepoint for GC allocations, because there is a high chance
896 // we are already running at safepoint or from stack watermark machinery, and we cannot
897 // block again.
898 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
899 return _free_set->allocate(req, in_new_region);
900 }
901
902 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
903 bool* gc_overhead_limit_was_exceeded) {
904 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
905 return allocate_memory(req);
906 }
907
908 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
909 size_t size,
910 Metaspace::MetadataType mdtype) {
911 MetaWord* result;
912
913 // Inform metaspace OOM to GC heuristics if class unloading is possible.
914 if (heuristics()->can_unload_classes()) {
915 ShenandoahHeuristics* h = heuristics();
916 h->record_metaspace_oom();
917 }
918
919 // Expand and retry allocation
920 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
921 if (result != nullptr) {
922 return result;
923 }
924
925 // Start full GC
926 collect(GCCause::_metadata_GC_clear_soft_refs);
927
928 // Retry allocation
929 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
930 if (result != nullptr) {
931 return result;
932 }
933
934 // Expand and retry allocation
935 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
987
988 private:
989 void do_work() {
990 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
991 ShenandoahHeapRegion* r;
992 while ((r =_cs->claim_next()) != nullptr) {
993 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
994 _sh->marked_object_iterate(r, &cl);
995
996 if (ShenandoahPacing) {
997 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
998 }
999
1000 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1001 break;
1002 }
1003 }
1004 }
1005 };
1006
1007 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1008 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1009 workers()->run_task(&task);
1010 }
1011
1012 void ShenandoahHeap::trash_cset_regions() {
1013 ShenandoahHeapLocker locker(lock());
1014
1015 ShenandoahCollectionSet* set = collection_set();
1016 ShenandoahHeapRegion* r;
1017 set->clear_current_index();
1018 while ((r = set->next()) != nullptr) {
1019 r->make_trash();
1020 }
1021 collection_set()->clear();
1022 }
1023
1024 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1025 st->print_cr("Heap Regions:");
1026 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1027 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1028 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1029 st->print_cr("UWM=update watermark, U=used");
1030 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1031 st->print_cr("S=shared allocs, L=live data");
1045 ShenandoahHeapRegion* region = start;
1046 size_t index = region->index();
1047 do {
1048 assert(region->is_humongous(), "Expect correct humongous start or continuation");
1049 assert(!region->is_cset(), "Humongous region should not be in collection set");
1050 region->make_trash_immediate();
1051 region = get_region(++index);
1052 } while (region != nullptr && region->is_humongous_continuation());
1053
1054 // Return number of regions trashed
1055 return index - start->index();
1056 }
1057
1058 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1059 public:
1060 ShenandoahCheckCleanGCLABClosure() {}
1061 void do_thread(Thread* thread) {
1062 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1063 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1064 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1065 }
1066 };
1067
1068 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1069 private:
1070 bool const _resize;
1071 public:
1072 ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1073 void do_thread(Thread* thread) {
1074 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1075 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1076 gclab->retire();
1077 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1078 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1079 }
1080 }
1081 };
1082
1083 void ShenandoahHeap::labs_make_parsable() {
1084 assert(UseTLAB, "Only call with UseTLAB");
1085
1086 ShenandoahRetireGCLABClosure cl(false);
1087
1088 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1089 ThreadLocalAllocBuffer& tlab = t->tlab();
1090 tlab.make_parsable();
1091 cl.do_thread(t);
1092 }
1093
1094 workers()->threads_do(&cl);
1095 }
1096
1097 void ShenandoahHeap::tlabs_retire(bool resize) {
1098 assert(UseTLAB, "Only call with UseTLAB");
1099 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1100
1101 ThreadLocalAllocStats stats;
1102
1103 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1104 ThreadLocalAllocBuffer& tlab = t->tlab();
1105 tlab.retire(&stats);
1106 if (resize) {
1107 tlab.resize();
1108 }
1109 }
1110
1111 stats.publish();
1112
1113 #ifdef ASSERT
1114 ShenandoahCheckCleanGCLABClosure cl;
1115 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1116 cl.do_thread(t);
1117 }
1118 workers()->threads_do(&cl);
1119 #endif
1120 }
1121
1122 void ShenandoahHeap::gclabs_retire(bool resize) {
1123 assert(UseTLAB, "Only call with UseTLAB");
1124 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1125
1126 ShenandoahRetireGCLABClosure cl(resize);
1127 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1128 cl.do_thread(t);
1129 }
1130 workers()->threads_do(&cl);
1131
1132 if (safepoint_workers() != nullptr) {
1133 safepoint_workers()->threads_do(&cl);
1134 }
1135 }
1136
1137 // Returns size in bytes
1138 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1139 // Return the max allowed size, and let the allocation path
1140 // figure out the safe size for current allocation.
1141 return ShenandoahHeapRegion::max_tlab_size_bytes();
1142 }
1143
1144 size_t ShenandoahHeap::max_tlab_size() const {
1145 // Returns size in words
1146 return ShenandoahHeapRegion::max_tlab_size_words();
1147 }
1148
1149 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1173 }
1174 return nullptr;
1175 }
1176
1177 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1178 ShenandoahHeapRegion* r = heap_region_containing(addr);
1179 return r->block_is_obj(addr);
1180 }
1181
1182 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1183 return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1184 }
1185
1186 void ShenandoahHeap::prepare_for_verify() {
1187 if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1188 labs_make_parsable();
1189 }
1190 }
1191
1192 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1193 tcl->do_thread(_control_thread);
1194 workers()->threads_do(tcl);
1195 if (_safepoint_workers != nullptr) {
1196 _safepoint_workers->threads_do(tcl);
1197 }
1198 }
1199
1200 void ShenandoahHeap::print_tracing_info() const {
1201 LogTarget(Info, gc, stats) lt;
1202 if (lt.is_enabled()) {
1203 ResourceMark rm;
1204 LogStream ls(lt);
1205
1206 phase_timings()->print_global_on(&ls);
1207
1208 ls.cr();
1209 ls.cr();
1210
1211 shenandoah_policy()->print_gc_stats(&ls);
1212
1213 ls.cr();
1214 ls.cr();
1215 }
1216 }
1217
1218 void ShenandoahHeap::verify(VerifyOption vo) {
1219 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1220 if (ShenandoahVerify) {
1221 verifier()->verify_generic(vo);
1222 } else {
1223 // TODO: Consider allocating verification bitmaps on demand,
1224 // and turn this on unconditionally.
1225 }
1226 }
1227 }
1228 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1229 return _free_set->capacity();
1230 }
1231
1232 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1233 private:
1234 MarkBitMap* _bitmap;
1235 ShenandoahScanObjectStack* _oop_stack;
1236 ShenandoahHeap* const _heap;
1237 ShenandoahMarkingContext* const _marking_context;
1532 const uint active_workers = workers()->active_workers();
1533 const size_t n_regions = num_regions();
1534 size_t stride = ShenandoahParallelRegionStride;
1535 if (stride == 0 && active_workers > 1) {
1536 // Automatically derive the stride to balance the work between threads
1537 // evenly. Do not try to split work if below the reasonable threshold.
1538 constexpr size_t threshold = 4096;
1539 stride = n_regions <= threshold ?
1540 threshold :
1541 (n_regions + active_workers - 1) / active_workers;
1542 }
1543
1544 if (n_regions > stride && active_workers > 1) {
1545 ShenandoahParallelHeapRegionTask task(blk, stride);
1546 workers()->run_task(&task);
1547 } else {
1548 heap_region_iterate(blk);
1549 }
1550 }
1551
1552 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1553 private:
1554 ShenandoahMarkingContext* const _ctx;
1555 public:
1556 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1557
1558 void heap_region_do(ShenandoahHeapRegion* r) {
1559 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1560 if (r->is_active()) {
1561 // Check if region needs updating its TAMS. We have updated it already during concurrent
1562 // reset, so it is very likely we don't need to do another write here.
1563 if (_ctx->top_at_mark_start(r) != r->top()) {
1564 _ctx->capture_top_at_mark_start(r);
1565 }
1566 } else {
1567 assert(_ctx->top_at_mark_start(r) == r->top(),
1568 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1569 }
1570 }
1571
1572 bool is_thread_safe() { return true; }
1573 };
1574
1575 class ShenandoahRendezvousClosure : public HandshakeClosure {
1576 public:
1577 inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1578 inline void do_thread(Thread* thread) {}
1579 };
1580
1581 void ShenandoahHeap::rendezvous_threads(const char* name) {
1582 ShenandoahRendezvousClosure cl(name);
1583 Handshake::execute(&cl);
1584 }
1585
1586 void ShenandoahHeap::recycle_trash() {
1587 free_set()->recycle_trash();
1588 }
1589
1590 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1591 private:
1592 ShenandoahMarkingContext* const _ctx;
1593 public:
1594 ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1595
1596 void heap_region_do(ShenandoahHeapRegion* r) {
1597 if (r->is_active()) {
1598 // Reset live data and set TAMS optimistically. We would recheck these under the pause
1599 // anyway to capture any updates that happened since now.
1600 r->clear_live_data();
1601 _ctx->capture_top_at_mark_start(r);
1602 }
1603 }
1604
1605 bool is_thread_safe() { return true; }
1606 };
1607
1608 void ShenandoahHeap::prepare_gc() {
1609 reset_mark_bitmap();
1610
1611 ShenandoahResetUpdateRegionStateClosure cl;
1612 parallel_heap_region_iterate(&cl);
1613 }
1614
1615 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1616 private:
1617 ShenandoahMarkingContext* const _ctx;
1618 ShenandoahHeapLock* const _lock;
1619
1620 public:
1621 ShenandoahFinalMarkUpdateRegionStateClosure() :
1622 _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1623
1624 void heap_region_do(ShenandoahHeapRegion* r) {
1625 if (r->is_active()) {
1626 // All allocations past TAMS are implicitly live, adjust the region data.
1627 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1628 HeapWord *tams = _ctx->top_at_mark_start(r);
1629 HeapWord *top = r->top();
1630 if (top > tams) {
1631 r->increase_live_data_alloc_words(pointer_delta(top, tams));
1632 }
1633
1634 // We are about to select the collection set, make sure it knows about
1635 // current pinning status. Also, this allows trashing more regions that
1636 // now have their pinning status dropped.
1637 if (r->is_pinned()) {
1638 if (r->pin_count() == 0) {
1639 ShenandoahHeapLocker locker(_lock);
1640 r->make_unpinned();
1641 }
1642 } else {
1643 if (r->pin_count() > 0) {
1644 ShenandoahHeapLocker locker(_lock);
1645 r->make_pinned();
1646 }
1647 }
1648
1649 // Remember limit for updating refs. It's guaranteed that we get no
1650 // from-space-refs written from here on.
1651 r->set_update_watermark_at_safepoint(r->top());
1652 } else {
1653 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1654 assert(_ctx->top_at_mark_start(r) == r->top(),
1655 "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1656 }
1657 }
1658
1659 bool is_thread_safe() { return true; }
1660 };
1661
1662 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1663 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1664 {
1665 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1666 ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1667 ShenandoahFinalMarkUpdateRegionStateClosure cl;
1668 parallel_heap_region_iterate(&cl);
1669
1670 assert_pinned_region_status();
1671 }
1672
1673 {
1674 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1675 ShenandoahPhaseTimings::degen_gc_choose_cset);
1676 ShenandoahHeapLocker locker(lock());
1677 _collection_set->clear();
1678 heuristics()->choose_collection_set(_collection_set);
1679 }
1680
1681 {
1682 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1683 ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1684 ShenandoahHeapLocker locker(lock());
1685 _free_set->rebuild();
1686 }
1687 }
1688
1689 void ShenandoahHeap::do_class_unloading() {
1690 _unloader.unload();
1691 }
1692
1693 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1694 // Weak refs processing
1695 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1696 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1697 ShenandoahTimingsTracker t(phase);
1698 ShenandoahGCWorkerPhase worker_phase(phase);
1699 ref_processor()->process_references(phase, workers(), false /* concurrent */);
1700 }
1701
1702 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1703 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1704
1705 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1706 // make them parsable for update code to work correctly. Plus, we can compute new sizes
1707 // for future GCLABs here.
1708 if (UseTLAB) {
1709 ShenandoahGCPhase phase(concurrent ?
1710 ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1711 ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1712 gclabs_retire(ResizeTLAB);
1713 }
1714
1715 _update_refs_iterator.reset();
1716 }
1717
1718 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1719 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1720 if (_gc_state_changed) {
1721 _gc_state_changed = false;
1722 char state = gc_state();
1723 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1724 ShenandoahThreadLocalData::set_gc_state(t, state);
1725 }
1726 }
1727 }
1728
1729 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1730 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1731 _gc_state.set_cond(mask, value);
1732 _gc_state_changed = true;
1733 }
1734
1735 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1736 assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1737 set_gc_state(MARKING, in_progress);
1738 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1739 }
1740
1741 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1742 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1743 set_gc_state(EVACUATION, in_progress);
1744 }
1745
1746 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1747 if (in_progress) {
1748 _concurrent_strong_root_in_progress.set();
1749 } else {
1750 _concurrent_strong_root_in_progress.unset();
1751 }
1752 }
1753
1754 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1755 set_gc_state(WEAK_ROOTS, cond);
1756 }
1757
1758 GCTracer* ShenandoahHeap::tracer() {
1759 return shenandoah_policy()->tracer();
1760 }
1761
1762 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1763 return _free_set->used();
1764 }
1765
1766 bool ShenandoahHeap::try_cancel_gc() {
1767 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1768 return prev == CANCELLABLE;
1769 }
1770
1771 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1772 if (try_cancel_gc()) {
1773 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1774 log_info(gc)("%s", msg.buffer());
1775 Events::log(Thread::current(), "%s", msg.buffer());
1776 }
1777 }
1778
1779 uint ShenandoahHeap::max_workers() {
1780 return _max_workers;
1781 }
1782
1783 void ShenandoahHeap::stop() {
1784 // The shutdown sequence should be able to terminate when GC is running.
1785
1786 // Step 0. Notify policy to disable event recording.
1787 _shenandoah_policy->record_shutdown();
1788
1789 // Step 1. Notify control thread that we are in shutdown.
1790 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1791 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1792 control_thread()->prepare_for_graceful_shutdown();
1793
1794 // Step 2. Notify GC workers that we are cancelling GC.
1795 cancel_gc(GCCause::_shenandoah_stop_vm);
1796
1797 // Step 3. Wait until GC worker exits normally.
1798 control_thread()->stop();
1799 }
1800
1801 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1802 if (!unload_classes()) return;
1803 ClassUnloadingContext ctx(_workers->active_workers(),
1804 true /* unregister_nmethods_during_purge */,
1805 false /* lock_codeblob_free_separately */);
1806
1807 // Unload classes and purge SystemDictionary.
1808 {
1809 ShenandoahPhaseTimings::Phase phase = full_gc ?
1810 ShenandoahPhaseTimings::full_gc_purge_class_unload :
1811 ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1812 ShenandoahIsAliveSelector is_alive;
1813 {
1814 CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
1815 ShenandoahGCPhase gc_phase(phase);
1816 ShenandoahGCWorkerPhase worker_phase(phase);
1817 bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
1818
1819 uint num_workers = _workers->active_workers();
1820 ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
1821 _workers->run_task(&unlink_task);
1822 }
1823 // Release unloaded nmethods's memory.
1824 ClassUnloadingContext::context()->purge_and_free_nmethods();
1825 }
1826
1827 {
1828 ShenandoahGCPhase phase(full_gc ?
1829 ShenandoahPhaseTimings::full_gc_purge_cldg :
1830 ShenandoahPhaseTimings::degen_gc_purge_cldg);
1831 ClassLoaderDataGraph::purge(true /* at_safepoint */);
1832 }
1833 // Resize and verify metaspace
1834 MetaspaceGC::compute_new_size();
1835 DEBUG_ONLY(MetaspaceUtils::verify();)
1836 }
1837
1838 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
1839 // so they should not have forwarded oops.
1840 // However, we do need to "null" dead oops in the roots, if can not be done
1841 // in concurrent cycles.
1842 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
1843 uint num_workers = _workers->active_workers();
1844 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
1845 ShenandoahPhaseTimings::full_gc_purge_weak_par :
1846 ShenandoahPhaseTimings::degen_gc_purge_weak_par;
1847 ShenandoahGCPhase phase(timing_phase);
1848 ShenandoahGCWorkerPhase worker_phase(timing_phase);
1849 // Cleanup weak roots
1850 if (has_forwarded_objects()) {
1851 ShenandoahForwardedIsAliveClosure is_alive;
1852 ShenandoahUpdateRefsClosure keep_alive;
1853 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
1854 cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
1855 _workers->run_task(&cleaning_task);
1856 } else {
1857 ShenandoahIsAliveClosure is_alive;
1858 #ifdef ASSERT
1862 #else
1863 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
1864 cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
1865 #endif
1866 _workers->run_task(&cleaning_task);
1867 }
1868 }
1869
1870 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
1871 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1872 assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
1873 ShenandoahGCPhase phase(full_gc ?
1874 ShenandoahPhaseTimings::full_gc_purge :
1875 ShenandoahPhaseTimings::degen_gc_purge);
1876 stw_weak_refs(full_gc);
1877 stw_process_weak_roots(full_gc);
1878 stw_unload_classes(full_gc);
1879 }
1880
1881 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1882 set_gc_state(HAS_FORWARDED, cond);
1883 }
1884
1885 void ShenandoahHeap::set_unload_classes(bool uc) {
1886 _unload_classes.set_cond(uc);
1887 }
1888
1889 bool ShenandoahHeap::unload_classes() const {
1890 return _unload_classes.is_set();
1891 }
1892
1893 address ShenandoahHeap::in_cset_fast_test_addr() {
1894 ShenandoahHeap* heap = ShenandoahHeap::heap();
1895 assert(heap->collection_set() != nullptr, "Sanity");
1896 return (address) heap->collection_set()->biased_map_address();
1897 }
1898
1899 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1900 return Atomic::load(&_bytes_allocated_since_gc_start);
1901 }
1902
1903 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1904 Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
1905 }
1906
1907 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1908 _degenerated_gc_in_progress.set_cond(in_progress);
1909 }
1910
1911 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1912 _full_gc_in_progress.set_cond(in_progress);
1913 }
1914
1915 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1916 assert (is_full_gc_in_progress(), "should be");
1917 _full_gc_move_in_progress.set_cond(in_progress);
1918 }
1919
1920 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1921 set_gc_state(UPDATEREFS, in_progress);
1922 }
1923
1924 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1925 ShenandoahCodeRoots::register_nmethod(nm);
1926 }
1927
1928 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
1929 ShenandoahCodeRoots::unregister_nmethod(nm);
1930 }
1931
1932 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
1933 heap_region_containing(o)->record_pin();
1934 }
1935
1936 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
1937 ShenandoahHeapRegion* r = heap_region_containing(o);
1938 assert(r != nullptr, "Sanity");
1939 assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
1940 r->record_unpin();
1941 }
1948 if (r->is_active()) {
1949 if (r->is_pinned()) {
1950 if (r->pin_count() == 0) {
1951 r->make_unpinned();
1952 }
1953 } else {
1954 if (r->pin_count() > 0) {
1955 r->make_pinned();
1956 }
1957 }
1958 }
1959 }
1960
1961 assert_pinned_region_status();
1962 }
1963
1964 #ifdef ASSERT
1965 void ShenandoahHeap::assert_pinned_region_status() {
1966 for (size_t i = 0; i < num_regions(); i++) {
1967 ShenandoahHeapRegion* r = get_region(i);
1968 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1969 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
1970 }
1971 }
1972 #endif
1973
1974 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1975 return _gc_timer;
1976 }
1977
1978 void ShenandoahHeap::prepare_concurrent_roots() {
1979 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1980 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1981 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1982 set_concurrent_weak_root_in_progress(true);
1983 if (unload_classes()) {
1984 _unloader.prepare();
1985 }
1986 }
1987
1988 void ShenandoahHeap::finish_concurrent_roots() {
1989 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1990 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1991 if (unload_classes()) {
1992 _unloader.finish();
1993 }
1994 }
1995
1996 #ifdef ASSERT
1997 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
1998 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
1999
2000 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2001 if (UseDynamicNumberOfGCThreads) {
2002 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2003 } else {
2004 // Use ParallelGCThreads inside safepoints
2005 assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
2006 }
2007 } else {
2008 if (UseDynamicNumberOfGCThreads) {
2009 assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2010 } else {
2011 // Use ConcGCThreads outside safepoints
2012 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2013 }
2014 }
2015 }
2016 #endif
2017
2018 ShenandoahVerifier* ShenandoahHeap::verifier() {
2019 guarantee(ShenandoahVerify, "Should be enabled");
2020 assert (_verifier != nullptr, "sanity");
2021 return _verifier;
2022 }
2023
2024 template<bool CONCURRENT>
2025 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2026 private:
2027 ShenandoahHeap* _heap;
2028 ShenandoahRegionIterator* _regions;
2029 public:
2030 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2031 WorkerTask("Shenandoah Update References"),
2032 _heap(ShenandoahHeap::heap()),
2033 _regions(regions) {
2034 }
2035
2036 void work(uint worker_id) {
2037 if (CONCURRENT) {
2038 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2039 ShenandoahSuspendibleThreadSetJoiner stsj;
2040 do_work<ShenandoahConcUpdateRefsClosure>();
2041 } else {
2042 ShenandoahParallelWorkerSession worker_session(worker_id);
2043 do_work<ShenandoahSTWUpdateRefsClosure>();
2044 }
2045 }
2046
2047 private:
2048 template<class T>
2049 void do_work() {
2050 T cl;
2051 ShenandoahHeapRegion* r = _regions->next();
2052 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2053 while (r != nullptr) {
2054 HeapWord* update_watermark = r->get_update_watermark();
2055 assert (update_watermark >= r->bottom(), "sanity");
2056 if (r->is_active() && !r->is_cset()) {
2057 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2058 }
2059 if (ShenandoahPacing) {
2060 _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2061 }
2062 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2063 return;
2064 }
2065 r = _regions->next();
2066 }
2067 }
2068 };
2069
2070 void ShenandoahHeap::update_heap_references(bool concurrent) {
2071 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2072
2073 if (concurrent) {
2074 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2075 workers()->run_task(&task);
2076 } else {
2077 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2078 workers()->run_task(&task);
2079 }
2080 }
2081
2082
2083 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2084 private:
2085 ShenandoahHeapLock* const _lock;
2086
2087 public:
2088 ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2089
2090 void heap_region_do(ShenandoahHeapRegion* r) {
2091 // Drop unnecessary "pinned" state from regions that does not have CP marks
2092 // anymore, as this would allow trashing them.
2093
2094 if (r->is_active()) {
2095 if (r->is_pinned()) {
2096 if (r->pin_count() == 0) {
2097 ShenandoahHeapLocker locker(_lock);
2098 r->make_unpinned();
2099 }
2100 } else {
2101 if (r->pin_count() > 0) {
2102 ShenandoahHeapLocker locker(_lock);
2103 r->make_pinned();
2104 }
2105 }
2106 }
2107 }
2108
2109 bool is_thread_safe() { return true; }
2110 };
2111
2112 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2113 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2114 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2115
2116 {
2117 ShenandoahGCPhase phase(concurrent ?
2118 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2119 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2120 ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2121 parallel_heap_region_iterate(&cl);
2122
2123 assert_pinned_region_status();
2124 }
2125
2126 {
2127 ShenandoahGCPhase phase(concurrent ?
2128 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2129 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2130 trash_cset_regions();
2131 }
2132 }
2133
2134 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2135 {
2136 ShenandoahGCPhase phase(concurrent ?
2137 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2138 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2139 ShenandoahHeapLocker locker(lock());
2140 _free_set->rebuild();
2141 }
2142 }
2143
2144 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2145 print_on(st);
2146 st->cr();
2147 print_heap_regions_on(st);
2148 }
2149
2150 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2151 size_t slice = r->index() / _bitmap_regions_per_slice;
2152
2153 size_t regions_from = _bitmap_regions_per_slice * slice;
2154 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2155 for (size_t g = regions_from; g < regions_to; g++) {
2156 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2157 if (skip_self && g == r->index()) continue;
2158 if (get_region(g)->is_committed()) {
2159 return true;
2160 }
2186 return false;
2187 }
2188
2189 if (AlwaysPreTouch) {
2190 os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2191 }
2192
2193 return true;
2194 }
2195
2196 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2197 shenandoah_assert_heaplocked();
2198
2199 // Bitmaps in special regions do not need uncommits
2200 if (_bitmap_region_special) {
2201 return true;
2202 }
2203
2204 if (is_bitmap_slice_committed(r, true)) {
2205 // Some other region from the group is still committed, meaning the bitmap
2206 // slice is should stay committed, exit right away.
2207 return true;
2208 }
2209
2210 // Uncommit the bitmap slice:
2211 size_t slice = r->index() / _bitmap_regions_per_slice;
2212 size_t off = _bitmap_bytes_per_slice * slice;
2213 size_t len = _bitmap_bytes_per_slice;
2214 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2215 return false;
2216 }
2217 return true;
2218 }
2219
2220 void ShenandoahHeap::safepoint_synchronize_begin() {
2221 SuspendibleThreadSet::synchronize();
2222 }
2223
2224 void ShenandoahHeap::safepoint_synchronize_end() {
2225 SuspendibleThreadSet::desynchronize();
2226 }
2227
2228 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2229 static const char *msg = "Concurrent uncommit";
2230 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2231 EventMark em("%s", msg);
2232
2233 op_uncommit(shrink_before, shrink_until);
2234 }
2235
2236 void ShenandoahHeap::try_inject_alloc_failure() {
2237 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2238 _inject_alloc_failure.set();
2239 os::naked_short_sleep(1);
2240 if (cancelled_gc()) {
2241 log_info(gc)("Allocation failure was successfully injected");
2242 }
2243 }
2244 }
2245
2246 bool ShenandoahHeap::should_inject_alloc_failure() {
2247 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2248 }
2249
2250 void ShenandoahHeap::initialize_serviceability() {
2251 _memory_pool = new ShenandoahMemoryPool(this);
2252 _cycle_memory_manager.add_pool(_memory_pool);
2253 _stw_memory_manager.add_pool(_memory_pool);
2254 }
2255
2256 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2257 GrowableArray<GCMemoryManager*> memory_managers(2);
2258 memory_managers.append(&_cycle_memory_manager);
2259 memory_managers.append(&_stw_memory_manager);
2260 return memory_managers;
2261 }
2262
2263 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2264 GrowableArray<MemoryPool*> memory_pools(1);
2265 memory_pools.append(_memory_pool);
2266 return memory_pools;
2267 }
2268
2269 MemoryUsage ShenandoahHeap::memory_usage() {
2270 return _memory_pool->get_memory_usage();
2271 }
2272
2273 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2274 _heap(ShenandoahHeap::heap()),
2275 _index(0) {}
2276
2277 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2278 _heap(heap),
2279 _index(0) {}
2280
2281 void ShenandoahRegionIterator::reset() {
2282 _index = 0;
2283 }
2284
2285 bool ShenandoahRegionIterator::has_next() const {
2286 return _index < _heap->num_regions();
2287 }
2288
2289 char ShenandoahHeap::gc_state() const {
2290 return _gc_state.raw_value();
2291 }
2292
2293 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2294 #ifdef ASSERT
2295 assert(_liveness_cache != nullptr, "sanity");
2296 assert(worker_id < _max_workers, "sanity");
2297 for (uint i = 0; i < num_regions(); i++) {
2298 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2299 }
2300 #endif
2301 return _liveness_cache[worker_id];
2302 }
2303
2304 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2305 assert(worker_id < _max_workers, "sanity");
2306 assert(_liveness_cache != nullptr, "sanity");
2307 ShenandoahLiveData* ld = _liveness_cache[worker_id];
2308 for (uint i = 0; i < num_regions(); i++) {
2309 ShenandoahLiveData live = ld[i];
2310 if (live > 0) {
2311 ShenandoahHeapRegion* r = get_region(i);
2312 r->increase_live_data_gc_words(live);
2315 }
2316 }
2317
2318 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2319 if (is_idle()) return false;
2320
2321 // Objects allocated after marking start are implicitly alive, don't need any barriers during
2322 // marking phase.
2323 if (is_concurrent_mark_in_progress() &&
2324 !marking_context()->allocated_after_mark_start(obj)) {
2325 return true;
2326 }
2327
2328 // Can not guarantee obj is deeply good.
2329 if (has_forwarded_objects()) {
2330 return true;
2331 }
2332
2333 return false;
2334 }
|
1 /*
2 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "memory/allocation.hpp"
29 #include "memory/universe.hpp"
30
31 #include "gc/shared/classUnloadingContext.hpp"
32 #include "gc/shared/gcArguments.hpp"
33 #include "gc/shared/gcTimer.hpp"
34 #include "gc/shared/gcTraceTime.inline.hpp"
35 #include "gc/shared/gc_globals.hpp"
36 #include "gc/shared/locationPrinter.inline.hpp"
37 #include "gc/shared/memAllocator.hpp"
38 #include "gc/shared/plab.hpp"
39 #include "gc/shared/tlab_globals.hpp"
40
41 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
42 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
43 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
44 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
45 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
46 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
47 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
48 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
50 #include "gc/shenandoah/shenandoahControlThread.hpp"
51 #include "gc/shenandoah/shenandoahFreeSet.hpp"
52 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
53 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
54 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
55 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
56 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
57 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
58 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
59 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
60 #include "gc/shenandoah/shenandoahInitLogger.hpp"
61 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
62 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
63 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
64 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
65 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
66 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
67 #include "gc/shenandoah/shenandoahPadding.hpp"
68 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
69 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
70 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
71 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
72 #include "gc/shenandoah/shenandoahSTWMark.hpp"
73 #include "gc/shenandoah/shenandoahUncommitThread.hpp"
74 #include "gc/shenandoah/shenandoahUtils.hpp"
75 #include "gc/shenandoah/shenandoahVerifier.hpp"
76 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
77 #include "gc/shenandoah/shenandoahVMOperations.hpp"
78 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
79 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
80 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
81 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
82 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
83 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
84 #include "utilities/globalDefinitions.hpp"
85
86 #if INCLUDE_JFR
87 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
88 #endif
89
90 #include "classfile/systemDictionary.hpp"
91 #include "code/codeCache.hpp"
92 #include "memory/classLoaderMetaspace.hpp"
93 #include "memory/metaspaceUtils.hpp"
94 #include "oops/compressedOops.inline.hpp"
95 #include "prims/jvmtiTagMap.hpp"
96 #include "runtime/atomic.hpp"
97 #include "runtime/globals.hpp"
98 #include "runtime/interfaceSupport.inline.hpp"
99 #include "runtime/java.hpp"
100 #include "runtime/orderAccess.hpp"
101 #include "runtime/safepointMechanism.hpp"
102 #include "runtime/threads.hpp"
103 #include "runtime/vmThread.hpp"
104 #include "services/mallocTracker.hpp"
105 #include "services/memTracker.hpp"
106 #include "utilities/events.hpp"
107 #include "utilities/powerOfTwo.hpp"
108
109 class ShenandoahPretouchHeapTask : public WorkerTask {
110 private:
111 ShenandoahRegionIterator _regions;
112 const size_t _page_size;
113 public:
114 ShenandoahPretouchHeapTask(size_t page_size) :
115 WorkerTask("Shenandoah Pretouch Heap"),
116 _page_size(page_size) {}
117
118 virtual void work(uint worker_id) {
119 ShenandoahHeapRegion* r = _regions.next();
120 while (r != nullptr) {
121 if (r->is_committed()) {
122 os::pretouch_memory(r->bottom(), r->end(), _page_size);
158 jint ShenandoahHeap::initialize() {
159 //
160 // Figure out heap sizing
161 //
162
163 size_t init_byte_size = InitialHeapSize;
164 size_t min_byte_size = MinHeapSize;
165 size_t max_byte_size = MaxHeapSize;
166 size_t heap_alignment = HeapAlignment;
167
168 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
169
170 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
171 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
172
173 _num_regions = ShenandoahHeapRegion::region_count();
174 assert(_num_regions == (max_byte_size / reg_size_bytes),
175 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
176 _num_regions, max_byte_size, reg_size_bytes);
177
178 size_t num_committed_regions = init_byte_size / reg_size_bytes;
179 num_committed_regions = MIN2(num_committed_regions, _num_regions);
180 assert(num_committed_regions <= _num_regions, "sanity");
181 _initial_size = num_committed_regions * reg_size_bytes;
182
183 size_t num_min_regions = min_byte_size / reg_size_bytes;
184 num_min_regions = MIN2(num_min_regions, _num_regions);
185 assert(num_min_regions <= _num_regions, "sanity");
186 _minimum_size = num_min_regions * reg_size_bytes;
187
188 _soft_max_size = SoftMaxHeapSize;
189
190 _committed = _initial_size;
191
192 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
193 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
194 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
195
196 //
197 // Reserve and commit memory for heap
198 //
199
200 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
201 initialize_reserved_region(heap_rs);
202 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
203 _heap_region_special = heap_rs.special();
204
205 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
206 "Misaligned heap: " PTR_FORMAT, p2i(base()));
207 os::trace_page_sizes_for_requested_size("Heap",
208 max_byte_size, heap_rs.page_size(), heap_alignment,
209 heap_rs.base(), heap_rs.size());
210
211 #if SHENANDOAH_OPTIMIZED_MARKTASK
212 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
213 // Fail if we ever attempt to address more than we can.
214 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
215 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
216 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
217 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
218 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
219 vm_exit_during_initialization("Fatal Error", buf);
220 }
221 #endif
222
223 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
224 if (!_heap_region_special) {
225 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
226 "Cannot commit heap memory");
227 }
228
229 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
230
231 // Now we know the number of regions and heap sizes, initialize the heuristics.
232 initialize_heuristics();
233
234 assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
235
236 //
237 // Worker threads must be initialized after the barrier is configured
238 //
239 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
240 if (_workers == nullptr) {
241 vm_exit_during_initialization("Failed necessary allocation.");
242 } else {
243 _workers->initialize_workers();
244 }
245
246 if (ParallelGCThreads > 1) {
247 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
248 _safepoint_workers->initialize_workers();
249 }
250
251 //
252 // Reserve and commit memory for bitmap(s)
253 //
254
255 size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
256 _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
257
258 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
259
260 guarantee(bitmap_bytes_per_region != 0,
261 "Bitmap bytes per region should not be zero");
262 guarantee(is_power_of_2(bitmap_bytes_per_region),
263 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
264
265 if (bitmap_page_size > bitmap_bytes_per_region) {
266 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
267 _bitmap_bytes_per_slice = bitmap_page_size;
268 } else {
269 _bitmap_regions_per_slice = 1;
270 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
271 }
272
273 guarantee(_bitmap_regions_per_slice >= 1,
274 "Should have at least one region per slice: " SIZE_FORMAT,
275 _bitmap_regions_per_slice);
276
277 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
278 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
279 _bitmap_bytes_per_slice, bitmap_page_size);
280
281 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
282 os::trace_page_sizes_for_requested_size("Mark Bitmap",
283 bitmap_size_orig, bitmap.page_size(), bitmap_page_size,
284 bitmap.base(),
285 bitmap.size());
286 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
287 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
288 _bitmap_region_special = bitmap.special();
289
290 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
291 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
292 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
293 if (!_bitmap_region_special) {
294 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
295 "Cannot commit bitmap memory");
296 }
297
298 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
299
300 if (ShenandoahVerify) {
301 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
302 os::trace_page_sizes_for_requested_size("Verify Bitmap",
303 bitmap_size_orig, verify_bitmap.page_size(), bitmap_page_size,
304 verify_bitmap.base(),
305 verify_bitmap.size());
306 if (!verify_bitmap.special()) {
307 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
308 "Cannot commit verification bitmap memory");
309 }
310 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
311 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
312 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
313 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
314 }
315
316 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
317 size_t aux_bitmap_page_size = bitmap_page_size;
318
319 ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
320 os::trace_page_sizes_for_requested_size("Aux Bitmap",
321 bitmap_size_orig, aux_bitmap.page_size(), aux_bitmap_page_size,
322 aux_bitmap.base(), aux_bitmap.size());
323 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
324 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
325 _aux_bitmap_region_special = aux_bitmap.special();
326 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
327
328 //
329 // Create regions and region sets
330 //
331 size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
332 size_t region_storage_size_orig = region_align * _num_regions;
333 size_t region_storage_size = align_up(region_storage_size_orig,
334 MAX2(region_page_size, os::vm_allocation_granularity()));
335
336 ReservedSpace region_storage(region_storage_size, region_page_size);
337 os::trace_page_sizes_for_requested_size("Region Storage",
338 region_storage_size_orig, region_storage.page_size(), region_page_size,
339 region_storage.base(), region_storage.size());
340 MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
341 if (!region_storage.special()) {
342 os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
343 "Cannot commit region memory");
344 }
345
346 // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
347 // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
348 // If not successful, bite a bullet and allocate at whatever address.
349 {
350 const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
351 const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
352 const size_t cset_page_size = os::vm_page_size();
353
354 uintptr_t min = round_up_power_of_2(cset_align);
355 uintptr_t max = (1u << 30u);
356 ReservedSpace cset_rs;
357
358 for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
359 char* req_addr = (char*)addr;
360 assert(is_aligned(req_addr, cset_align), "Should be aligned");
361 cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
362 if (cset_rs.is_reserved()) {
363 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
364 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
365 break;
366 }
367 }
368
369 if (_collection_set == nullptr) {
370 cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
371 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
372 }
373 os::trace_page_sizes_for_requested_size("Collection Set",
374 cset_size, cset_rs.page_size(), cset_page_size,
375 cset_rs.base(),
376 cset_rs.size());
377 }
378
379 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
380 _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
381 _free_set = new ShenandoahFreeSet(this, _num_regions);
382
383 {
384 ShenandoahHeapLocker locker(lock());
385
386 for (size_t i = 0; i < _num_regions; i++) {
387 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
388 bool is_committed = i < num_committed_regions;
389 void* loc = region_storage.base() + i * region_align;
390
391 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
392 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
393
394 _marking_context->initialize_top_at_mark_start(r);
395 _regions[i] = r;
396 assert(!collection_set()->is_in(i), "New region should not be in collection set");
397
398 _affiliations[i] = ShenandoahAffiliation::FREE;
399 }
400
401 size_t young_cset_regions, old_cset_regions;
402
403 // We are initializing free set. We ignore cset region tallies.
404 size_t first_old, last_old, num_old;
405 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
406 _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
407 }
408
409 if (AlwaysPreTouch) {
410 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
411 // before initialize() below zeroes it with initializing thread. For any given region,
412 // we touch the region and the corresponding bitmaps from the same thread.
413 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
414
415 _pretouch_heap_page_size = heap_page_size;
416 _pretouch_bitmap_page_size = bitmap_page_size;
417
418 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
419 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
420
421 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
422 _workers->run_task(&bcl);
423
424 ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
425 _workers->run_task(&hcl);
426 }
427
428 //
429 // Initialize the rest of GC subsystems
430 //
431
432 _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
433 for (uint worker = 0; worker < _max_workers; worker++) {
434 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
435 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
436 }
437
438 // There should probably be Shenandoah-specific options for these,
439 // just as there are G1-specific options.
440 {
441 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
442 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
443 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
444 }
445
446 _monitoring_support = new ShenandoahMonitoringSupport(this);
447 _phase_timings = new ShenandoahPhaseTimings(max_workers());
448 ShenandoahCodeRoots::initialize();
449
450 if (ShenandoahPacing) {
451 _pacer = new ShenandoahPacer(this);
452 _pacer->setup_for_idle();
453 }
454
455 initialize_controller();
456
457 if (ShenandoahUncommit) {
458 _uncommit_thread = new ShenandoahUncommitThread(this);
459 }
460
461 print_init_logger();
462
463 return JNI_OK;
464 }
465
466 void ShenandoahHeap::initialize_controller() {
467 _control_thread = new ShenandoahControlThread();
468 }
469
470 void ShenandoahHeap::print_init_logger() const {
471 ShenandoahInitLogger::print();
472 }
473
474 void ShenandoahHeap::initialize_mode() {
475 if (ShenandoahGCMode != nullptr) {
476 if (strcmp(ShenandoahGCMode, "satb") == 0) {
477 _gc_mode = new ShenandoahSATBMode();
478 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
479 _gc_mode = new ShenandoahPassiveMode();
480 } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
481 _gc_mode = new ShenandoahGenerationalMode();
482 } else {
483 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
484 }
485 } else {
486 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
487 }
488 _gc_mode->initialize_flags();
489 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
490 vm_exit_during_initialization(
491 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
492 _gc_mode->name()));
493 }
494 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
495 vm_exit_during_initialization(
496 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
497 _gc_mode->name()));
498 }
499 }
500
501 void ShenandoahHeap::initialize_heuristics() {
502 _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity());
503 _global_generation->initialize_heuristics(mode());
504 }
505
506 #ifdef _MSC_VER
507 #pragma warning( push )
508 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
509 #endif
510
511 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
512 CollectedHeap(),
513 _gc_generation(nullptr),
514 _active_generation(nullptr),
515 _initial_size(0),
516 _committed(0),
517 _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
518 _workers(nullptr),
519 _safepoint_workers(nullptr),
520 _heap_region_special(false),
521 _num_regions(0),
522 _regions(nullptr),
523 _affiliations(nullptr),
524 _gc_state_changed(false),
525 _gc_no_progress_count(0),
526 _cancel_requested_time(0),
527 _update_refs_iterator(this),
528 _global_generation(nullptr),
529 _control_thread(nullptr),
530 _uncommit_thread(nullptr),
531 _young_generation(nullptr),
532 _old_generation(nullptr),
533 _shenandoah_policy(policy),
534 _gc_mode(nullptr),
535 _free_set(nullptr),
536 _pacer(nullptr),
537 _verifier(nullptr),
538 _phase_timings(nullptr),
539 _monitoring_support(nullptr),
540 _memory_pool(nullptr),
541 _stw_memory_manager("Shenandoah Pauses"),
542 _cycle_memory_manager("Shenandoah Cycles"),
543 _gc_timer(new ConcurrentGCTimer()),
544 _soft_ref_policy(),
545 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
546 _marking_context(nullptr),
547 _bitmap_size(0),
548 _bitmap_regions_per_slice(0),
549 _bitmap_bytes_per_slice(0),
550 _bitmap_region_special(false),
551 _aux_bitmap_region_special(false),
552 _liveness_cache(nullptr),
553 _collection_set(nullptr)
554 {
555 // Initialize GC mode early, many subsequent initialization procedures depend on it
556 initialize_mode();
557 _cancelled_gc.set(GCCause::_no_gc);
558 }
559
560 #ifdef _MSC_VER
561 #pragma warning( pop )
562 #endif
563
564 void ShenandoahHeap::print_on(outputStream* st) const {
565 st->print_cr("Shenandoah Heap");
566 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
567 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
568 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
569 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
570 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
571 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
572 num_regions(),
573 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
574 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
575
576 st->print("Status: ");
577 if (has_forwarded_objects()) st->print("has forwarded objects, ");
578 if (!mode()->is_generational()) {
579 if (is_concurrent_mark_in_progress()) st->print("marking,");
580 } else {
581 if (is_concurrent_old_mark_in_progress()) st->print("old marking, ");
582 if (is_concurrent_young_mark_in_progress()) st->print("young marking, ");
583 }
584 if (is_evacuation_in_progress()) st->print("evacuating, ");
585 if (is_update_refs_in_progress()) st->print("updating refs, ");
586 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
587 if (is_full_gc_in_progress()) st->print("full gc, ");
588 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
589 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
590 if (is_concurrent_strong_root_in_progress() &&
591 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
592
593 if (cancelled_gc()) {
594 st->print("cancelled");
595 } else {
596 st->print("not cancelled");
597 }
598 st->cr();
599
600 st->print_cr("Reserved region:");
601 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
602 p2i(reserved_region().start()),
603 p2i(reserved_region().end()));
607 if (cset != nullptr) {
608 st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
609 st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
610 } else {
611 st->print_cr(" (null)");
612 }
613
614 st->cr();
615 MetaspaceUtils::print_on(st);
616
617 if (Verbose) {
618 st->cr();
619 print_heap_regions_on(st);
620 }
621 }
622
623 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
624 public:
625 void do_thread(Thread* thread) {
626 assert(thread != nullptr, "Sanity");
627 ShenandoahThreadLocalData::initialize_gclab(thread);
628 }
629 };
630
631 void ShenandoahHeap::post_initialize() {
632 CollectedHeap::post_initialize();
633
634 // Schedule periodic task to report on gc thread CPU utilization
635 _mmu_tracker.initialize();
636
637 MutexLocker ml(Threads_lock);
638
639 ShenandoahInitWorkerGCLABClosure init_gclabs;
640 _workers->threads_do(&init_gclabs);
641
642 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
643 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
644 _workers->set_initialize_gclab();
645
646 // Note that the safepoint workers may require gclabs if the threads are used to create a heap dump
647 // during a concurrent evacuation phase.
648 if (_safepoint_workers != nullptr) {
649 _safepoint_workers->threads_do(&init_gclabs);
650 _safepoint_workers->set_initialize_gclab();
651 }
652
653 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
654 }
655
656 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
657 return _global_generation->heuristics();
658 }
659
660 size_t ShenandoahHeap::used() const {
661 return global_generation()->used();
662 }
663
664 size_t ShenandoahHeap::committed() const {
665 return Atomic::load(&_committed);
666 }
667
668 void ShenandoahHeap::increase_committed(size_t bytes) {
669 shenandoah_assert_heaplocked_or_safepoint();
670 _committed += bytes;
671 }
672
673 void ShenandoahHeap::decrease_committed(size_t bytes) {
674 shenandoah_assert_heaplocked_or_safepoint();
675 _committed -= bytes;
676 }
677
678 // For tracking usage based on allocations, it should be the case that:
679 // * The sum of regions::used == heap::used
680 // * The sum of a generation's regions::used == generation::used
681 // * The sum of a generation's humongous regions::free == generation::humongous_waste
682 // These invariants are checked by the verifier on GC safepoints.
683 //
684 // Additional notes:
685 // * When a mutator's allocation request causes a region to be retired, the
686 // free memory left in that region is considered waste. It does not contribute
687 // to the usage, but it _does_ contribute to allocation rate.
688 // * The bottom of a PLAB must be aligned on card size. In some cases this will
689 // require padding in front of the PLAB (a filler object). Because this padding
690 // is included in the region's used memory we include the padding in the usage
691 // accounting as waste.
692 // * Mutator allocations are used to compute an allocation rate. They are also
693 // sent to the Pacer for those purposes.
694 // * There are three sources of waste:
695 // 1. The padding used to align a PLAB on card size
696 // 2. Region's free is less than minimum TLAB size and is retired
697 // 3. The unused portion of memory in the last region of a humongous object
698 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
699 size_t actual_bytes = req.actual_size() * HeapWordSize;
700 size_t wasted_bytes = req.waste() * HeapWordSize;
701 ShenandoahGeneration* generation = generation_for(req.affiliation());
702
703 if (req.is_gc_alloc()) {
704 assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
705 increase_used(generation, actual_bytes + wasted_bytes);
706 } else {
707 assert(req.is_mutator_alloc(), "Expected mutator alloc here");
708 // padding and actual size both count towards allocation counter
709 generation->increase_allocated(actual_bytes + wasted_bytes);
710
711 // only actual size counts toward usage for mutator allocations
712 increase_used(generation, actual_bytes);
713
714 // notify pacer of both actual size and waste
715 notify_mutator_alloc_words(req.actual_size(), req.waste());
716
717 if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
718 increase_humongous_waste(generation,wasted_bytes);
719 }
720 }
721 }
722
723 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
724 generation->increase_humongous_waste(bytes);
725 if (!generation->is_global()) {
726 global_generation()->increase_humongous_waste(bytes);
727 }
728 }
729
730 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
731 generation->decrease_humongous_waste(bytes);
732 if (!generation->is_global()) {
733 global_generation()->decrease_humongous_waste(bytes);
734 }
735 }
736
737 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
738 generation->increase_used(bytes);
739 if (!generation->is_global()) {
740 global_generation()->increase_used(bytes);
741 }
742 }
743
744 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
745 generation->decrease_used(bytes);
746 if (!generation->is_global()) {
747 global_generation()->decrease_used(bytes);
748 }
749 }
750
751 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
752 if (ShenandoahPacing) {
753 control_thread()->pacing_notify_alloc(words);
754 if (waste > 0) {
755 pacer()->claim_for_alloc<true>(waste);
756 }
757 }
758 }
759
760 size_t ShenandoahHeap::capacity() const {
761 return committed();
762 }
763
764 size_t ShenandoahHeap::max_capacity() const {
765 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
766 }
767
768 size_t ShenandoahHeap::soft_max_capacity() const {
769 size_t v = Atomic::load(&_soft_max_size);
770 assert(min_capacity() <= v && v <= max_capacity(),
771 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
772 min_capacity(), v, max_capacity());
773 return v;
774 }
775
776 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
777 assert(min_capacity() <= v && v <= max_capacity(),
778 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
779 min_capacity(), v, max_capacity());
780 Atomic::store(&_soft_max_size, v);
781 }
782
783 size_t ShenandoahHeap::min_capacity() const {
784 return _minimum_size;
785 }
786
787 size_t ShenandoahHeap::initial_capacity() const {
788 return _initial_size;
789 }
790
791 bool ShenandoahHeap::is_in(const void* p) const {
792 if (!is_in_reserved(p)) {
793 return false;
794 }
795
796 if (is_full_gc_move_in_progress()) {
797 // Full GC move is running, we do not have a consistent region
798 // information yet. But we know the pointer is in heap.
799 return true;
800 }
801
802 // Now check if we point to a live section in active region.
803 const ShenandoahHeapRegion* r = heap_region_containing(p);
804 if (p >= r->top()) {
805 return false;
806 }
807
808 if (r->is_active()) {
809 return true;
810 }
811
812 // The region is trash, but won't be recycled until after concurrent weak
813 // roots. We also don't allow mutators to allocate from trash regions
814 // during weak roots. Concurrent class unloading may access unmarked oops
815 // in trash regions.
816 return r->is_trash() && is_concurrent_weak_root_in_progress();
817 }
818
819 void ShenandoahHeap::notify_soft_max_changed() {
820 if (_uncommit_thread != nullptr) {
821 _uncommit_thread->notify_soft_max_changed();
822 }
823 }
824
825 void ShenandoahHeap::notify_explicit_gc_requested() {
826 if (_uncommit_thread != nullptr) {
827 _uncommit_thread->notify_explicit_gc_requested();
828 }
829 }
830
831 bool ShenandoahHeap::check_soft_max_changed() {
832 size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
833 size_t old_soft_max = soft_max_capacity();
834 if (new_soft_max != old_soft_max) {
835 new_soft_max = MAX2(min_capacity(), new_soft_max);
836 new_soft_max = MIN2(max_capacity(), new_soft_max);
837 if (new_soft_max != old_soft_max) {
838 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
839 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
840 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
841 );
842 set_soft_max_capacity(new_soft_max);
843 return true;
844 }
845 }
846 return false;
847 }
848
849 void ShenandoahHeap::notify_heap_changed() {
850 // Update monitoring counters when we took a new region. This amortizes the
851 // update costs on slow path.
852 monitoring_support()->notify_heap_changed();
853 _heap_changed.try_set();
854 }
855
856 void ShenandoahHeap::set_forced_counters_update(bool value) {
857 monitoring_support()->set_forced_counters_update(value);
858 }
859
860 void ShenandoahHeap::handle_force_counters_update() {
861 monitoring_support()->handle_force_counters_update();
862 }
863
864 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
865 // New object should fit the GCLAB size
866 size_t min_size = MAX2(size, PLAB::min_size());
867
868 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
869 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
870
871 new_size = MIN2(new_size, PLAB::max_size());
872 new_size = MAX2(new_size, PLAB::min_size());
873
874 // Record new heuristic value even if we take any shortcut. This captures
875 // the case when moderately-sized objects always take a shortcut. At some point,
876 // heuristics should catch up with them.
877 log_debug(gc, free)("Set new GCLAB size: " SIZE_FORMAT, new_size);
878 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
879
880 if (new_size < size) {
881 // New size still does not fit the object. Fall back to shared allocation.
882 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
883 log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
884 return nullptr;
885 }
886
887 // Retire current GCLAB, and allocate a new one.
888 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
889 gclab->retire();
890
891 size_t actual_size = 0;
892 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
893 if (gclab_buf == nullptr) {
894 return nullptr;
895 }
896
897 assert (size <= actual_size, "allocation should fit");
898
899 // ...and clear or zap just allocated TLAB, if needed.
900 if (ZeroTLAB) {
901 Copy::zero_to_words(gclab_buf, actual_size);
902 } else if (ZapTLAB) {
903 // Skip mangling the space corresponding to the object header to
904 // ensure that the returned space is not considered parsable by
905 // any concurrent GC thread.
906 size_t hdr_size = oopDesc::header_size();
907 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
908 }
909 gclab->set_buf(gclab_buf, actual_size);
910 return gclab->allocate(size);
911 }
912
913 // Called from stubs in JIT code or interpreter
914 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
915 size_t requested_size,
916 size_t* actual_size) {
917 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
918 HeapWord* res = allocate_memory(req);
919 if (res != nullptr) {
920 *actual_size = req.actual_size();
921 } else {
922 *actual_size = 0;
923 }
924 return res;
925 }
926
927 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
928 size_t word_size,
929 size_t* actual_size) {
930 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
931 HeapWord* res = allocate_memory(req);
932 if (res != nullptr) {
933 *actual_size = req.actual_size();
935 *actual_size = 0;
936 }
937 return res;
938 }
939
940 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
941 intptr_t pacer_epoch = 0;
942 bool in_new_region = false;
943 HeapWord* result = nullptr;
944
945 if (req.is_mutator_alloc()) {
946 if (ShenandoahPacing) {
947 pacer()->pace_for_alloc(req.size());
948 pacer_epoch = pacer()->epoch();
949 }
950
951 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
952 result = allocate_memory_under_lock(req, in_new_region);
953 }
954
955 // Check that gc overhead is not exceeded.
956 //
957 // Shenandoah will grind along for quite a while allocating one
958 // object at a time using shared (non-tlab) allocations. This check
959 // is testing that the GC overhead limit has not been exceeded.
960 // This will notify the collector to start a cycle, but will raise
961 // an OOME to the mutator if the last Full GCs have not made progress.
962 // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
963 if ((result == nullptr) && !req.is_lab_alloc() && (get_gc_no_progress_count() > ShenandoahNoProgressThreshold)) {
964 control_thread()->handle_alloc_failure(req, false);
965 req.set_actual_size(0);
966 return nullptr;
967 }
968
969 if (result == nullptr) {
970 // Block until control thread reacted, then retry allocation.
971 //
972 // It might happen that one of the threads requesting allocation would unblock
973 // way later after GC happened, only to fail the second allocation, because
974 // other threads have already depleted the free storage. In this case, a better
975 // strategy is to try again, until at least one full GC has completed.
976 //
977 // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
978 // a) We experienced a GC that had good progress, or
979 // b) We experienced at least one Full GC (whether or not it had good progress)
980
981 const size_t original_count = shenandoah_policy()->full_gc_count();
982 while (result == nullptr && should_retry_allocation(original_count)) {
983 control_thread()->handle_alloc_failure(req, true);
984 result = allocate_memory_under_lock(req, in_new_region);
985 }
986 if (result != nullptr) {
987 // If our allocation request has been satisfied after it initially failed, we count this as good gc progress
988 notify_gc_progress();
989 }
990 if (log_develop_is_enabled(Debug, gc, alloc)) {
991 ResourceMark rm;
992 log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
993 ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
994 Thread::current()->name(), p2i(result), req.type_string(), req.size(),
995 original_count, get_gc_no_progress_count());
996 }
997 }
998 } else {
999 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1000 result = allocate_memory_under_lock(req, in_new_region);
1001 // Do not call handle_alloc_failure() here, because we cannot block.
1002 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1003 }
1004
1005 if (in_new_region) {
1006 notify_heap_changed();
1007 }
1008
1009 if (result == nullptr) {
1010 req.set_actual_size(0);
1011 }
1012
1013 // This is called regardless of the outcome of the allocation to account
1014 // for any waste created by retiring regions with this request.
1015 increase_used(req);
1016
1017 if (result != nullptr) {
1018 size_t requested = req.size();
1019 size_t actual = req.actual_size();
1020
1021 assert (req.is_lab_alloc() || (requested == actual),
1022 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1023 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1024
1025 if (req.is_mutator_alloc()) {
1026 // If we requested more than we were granted, give the rest back to pacer.
1027 // This only matters if we are in the same pacing epoch: do not try to unpace
1028 // over the budget for the other phase.
1029 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1030 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1031 }
1032 }
1033 }
1034
1035 return result;
1036 }
1037
1038 inline bool ShenandoahHeap::should_retry_allocation(size_t original_full_gc_count) const {
1039 return shenandoah_policy()->full_gc_count() == original_full_gc_count
1040 && !shenandoah_policy()->is_at_shutdown();
1041 }
1042
1043 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1044 // If we are dealing with mutator allocation, then we may need to block for safepoint.
1045 // We cannot block for safepoint for GC allocations, because there is a high chance
1046 // we are already running at safepoint or from stack watermark machinery, and we cannot
1047 // block again.
1048 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1049
1050 // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1051 if (req.is_old() && !old_generation()->can_allocate(req)) {
1052 return nullptr;
1053 }
1054
1055 // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1056 // memory.
1057 HeapWord* result = _free_set->allocate(req, in_new_region);
1058
1059 // Record the plab configuration for this result and register the object.
1060 if (result != nullptr && req.is_old()) {
1061 old_generation()->configure_plab_for_current_thread(req);
1062 if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1063 // Register the newly allocated object while we're holding the global lock since there's no synchronization
1064 // built in to the implementation of register_object(). There are potential races when multiple independent
1065 // threads are allocating objects, some of which might span the same card region. For example, consider
1066 // a card table's memory region within which three objects are being allocated by three different threads:
1067 //
1068 // objects being "concurrently" allocated:
1069 // [-----a------][-----b-----][--------------c------------------]
1070 // [---- card table memory range --------------]
1071 //
1072 // Before any objects are allocated, this card's memory range holds no objects. Note that allocation of object a
1073 // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1074 // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1075 // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1076 // card region.
1077 //
1078 // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1079 // last-start representing object b while first-start represents object c. This is why we need to require all
1080 // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1081 old_generation()->card_scan()->register_object(result);
1082 }
1083 }
1084
1085 return result;
1086 }
1087
1088 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1089 bool* gc_overhead_limit_was_exceeded) {
1090 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1091 return allocate_memory(req);
1092 }
1093
1094 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1095 size_t size,
1096 Metaspace::MetadataType mdtype) {
1097 MetaWord* result;
1098
1099 // Inform metaspace OOM to GC heuristics if class unloading is possible.
1100 ShenandoahHeuristics* h = global_generation()->heuristics();
1101 if (h->can_unload_classes()) {
1102 h->record_metaspace_oom();
1103 }
1104
1105 // Expand and retry allocation
1106 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1107 if (result != nullptr) {
1108 return result;
1109 }
1110
1111 // Start full GC
1112 collect(GCCause::_metadata_GC_clear_soft_refs);
1113
1114 // Retry allocation
1115 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1116 if (result != nullptr) {
1117 return result;
1118 }
1119
1120 // Expand and retry allocation
1121 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1173
1174 private:
1175 void do_work() {
1176 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1177 ShenandoahHeapRegion* r;
1178 while ((r =_cs->claim_next()) != nullptr) {
1179 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1180 _sh->marked_object_iterate(r, &cl);
1181
1182 if (ShenandoahPacing) {
1183 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1184 }
1185
1186 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1187 break;
1188 }
1189 }
1190 }
1191 };
1192
1193 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1194 private:
1195 bool const _resize;
1196 public:
1197 explicit ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1198 void do_thread(Thread* thread) override {
1199 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1200 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1201 gclab->retire();
1202 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1203 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1204 }
1205
1206 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1207 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1208 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1209
1210 // There are two reasons to retire all plabs between old-gen evacuation passes.
1211 // 1. We need to make the plab memory parsable by remembered-set scanning.
1212 // 2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1213 ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1214
1215 // Re-enable promotions for the next evacuation phase.
1216 ShenandoahThreadLocalData::enable_plab_promotions(thread);
1217
1218 // Reset the fill size for next evacuation phase.
1219 if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1220 ShenandoahThreadLocalData::set_plab_size(thread, 0);
1221 }
1222 }
1223 }
1224 };
1225
1226 class ShenandoahGCStatePropagator : public HandshakeClosure {
1227 public:
1228 explicit ShenandoahGCStatePropagator(char gc_state) :
1229 HandshakeClosure("Shenandoah GC State Change"),
1230 _gc_state(gc_state) {}
1231
1232 void do_thread(Thread* thread) override {
1233 ShenandoahThreadLocalData::set_gc_state(thread, _gc_state);
1234 }
1235 private:
1236 char _gc_state;
1237 };
1238
1239 class ShenandoahPrepareForUpdateRefs : public HandshakeClosure {
1240 public:
1241 explicit ShenandoahPrepareForUpdateRefs(char gc_state) :
1242 HandshakeClosure("Shenandoah Prepare for Update Refs"),
1243 _retire(ResizeTLAB), _propagator(gc_state) {}
1244
1245 void do_thread(Thread* thread) override {
1246 _propagator.do_thread(thread);
1247 if (ShenandoahThreadLocalData::gclab(thread) != nullptr) {
1248 _retire.do_thread(thread);
1249 }
1250 }
1251 private:
1252 ShenandoahRetireGCLABClosure _retire;
1253 ShenandoahGCStatePropagator _propagator;
1254 };
1255
1256 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1257 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1258 workers()->run_task(&task);
1259 }
1260
1261 void ShenandoahHeap::concurrent_prepare_for_update_refs() {
1262 {
1263 // Java threads take this lock while they are being attached and added to the list of thread.
1264 // If another thread holds this lock before we update the gc state, it will receive a stale
1265 // gc state, but they will have been added to the list of java threads and so will be corrected
1266 // by the following handshake.
1267 MutexLocker lock(Threads_lock);
1268
1269 // A cancellation at this point means the degenerated cycle must resume from update-refs.
1270 set_gc_state_concurrent(EVACUATION, false);
1271 set_gc_state_concurrent(WEAK_ROOTS, false);
1272 set_gc_state_concurrent(UPDATE_REFS, true);
1273 }
1274
1275 // This will propagate the gc state and retire gclabs and plabs for threads that require it.
1276 ShenandoahPrepareForUpdateRefs prepare_for_update_refs(_gc_state.raw_value());
1277
1278 // The handshake won't touch worker threads (or control thread, or VM thread), so do those separately.
1279 Threads::non_java_threads_do(&prepare_for_update_refs);
1280
1281 // Now retire gclabs and plabs and propagate gc_state for mutator threads
1282 Handshake::execute(&prepare_for_update_refs);
1283
1284 _update_refs_iterator.reset();
1285 }
1286
1287 class ShenandoahCompositeHandshakeClosure : public HandshakeClosure {
1288 HandshakeClosure* _handshake_1;
1289 HandshakeClosure* _handshake_2;
1290 public:
1291 ShenandoahCompositeHandshakeClosure(HandshakeClosure* handshake_1, HandshakeClosure* handshake_2) :
1292 HandshakeClosure(handshake_2->name()),
1293 _handshake_1(handshake_1), _handshake_2(handshake_2) {}
1294
1295 void do_thread(Thread* thread) override {
1296 _handshake_1->do_thread(thread);
1297 _handshake_2->do_thread(thread);
1298 }
1299 };
1300
1301 void ShenandoahHeap::concurrent_final_roots(HandshakeClosure* handshake_closure) {
1302 {
1303 assert(!is_evacuation_in_progress(), "Should not evacuate for abbreviated or old cycles");
1304 MutexLocker lock(Threads_lock);
1305 set_gc_state_concurrent(WEAK_ROOTS, false);
1306 }
1307
1308 ShenandoahGCStatePropagator propagator(_gc_state.raw_value());
1309 Threads::non_java_threads_do(&propagator);
1310 if (handshake_closure == nullptr) {
1311 Handshake::execute(&propagator);
1312 } else {
1313 ShenandoahCompositeHandshakeClosure composite(&propagator, handshake_closure);
1314 Handshake::execute(&composite);
1315 }
1316 }
1317
1318 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1319 assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1320 if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1321 // This thread went through the OOM during evac protocol. It is safe to return
1322 // the forward pointer. It must not attempt to evacuate any other objects.
1323 return ShenandoahBarrierSet::resolve_forwarded(p);
1324 }
1325
1326 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1327
1328 ShenandoahHeapRegion* r = heap_region_containing(p);
1329 assert(!r->is_humongous(), "never evacuate humongous objects");
1330
1331 ShenandoahAffiliation target_gen = r->affiliation();
1332 return try_evacuate_object(p, thread, r, target_gen);
1333 }
1334
1335 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1336 ShenandoahAffiliation target_gen) {
1337 assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1338 assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1339 bool alloc_from_lab = true;
1340 HeapWord* copy = nullptr;
1341 size_t size = p->size();
1342
1343 #ifdef ASSERT
1344 if (ShenandoahOOMDuringEvacALot &&
1345 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1346 copy = nullptr;
1347 } else {
1348 #endif
1349 if (UseTLAB) {
1350 copy = allocate_from_gclab(thread, size);
1351 }
1352 if (copy == nullptr) {
1353 // If we failed to allocate in LAB, we'll try a shared allocation.
1354 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1355 copy = allocate_memory(req);
1356 alloc_from_lab = false;
1357 }
1358 #ifdef ASSERT
1359 }
1360 #endif
1361
1362 if (copy == nullptr) {
1363 control_thread()->handle_alloc_failure_evac(size);
1364
1365 _oom_evac_handler.handle_out_of_memory_during_evacuation();
1366
1367 return ShenandoahBarrierSet::resolve_forwarded(p);
1368 }
1369
1370 // Copy the object:
1371 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1372
1373 // Try to install the new forwarding pointer.
1374 oop copy_val = cast_to_oop(copy);
1375 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1376 if (result == copy_val) {
1377 // Successfully evacuated. Our copy is now the public one!
1378 ContinuationGCSupport::relativize_stack_chunk(copy_val);
1379 shenandoah_assert_correct(nullptr, copy_val);
1380 return copy_val;
1381 } else {
1382 // Failed to evacuate. We need to deal with the object that is left behind. Since this
1383 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1384 // But if it happens to contain references to evacuated regions, those references would
1385 // not get updated for this stale copy during this cycle, and we will crash while scanning
1386 // it the next cycle.
1387 if (alloc_from_lab) {
1388 // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1389 // object will overwrite this stale copy, or the filler object on LAB retirement will
1390 // do this.
1391 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1392 } else {
1393 // For non-LAB allocations, we have no way to retract the allocation, and
1394 // have to explicitly overwrite the copy with the filler object. With that overwrite,
1395 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1396 assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1397 fill_with_object(copy, size);
1398 shenandoah_assert_correct(nullptr, copy_val);
1399 // For non-LAB allocations, the object has already been registered
1400 }
1401 shenandoah_assert_correct(nullptr, result);
1402 return result;
1403 }
1404 }
1405
1406 void ShenandoahHeap::trash_cset_regions() {
1407 ShenandoahHeapLocker locker(lock());
1408
1409 ShenandoahCollectionSet* set = collection_set();
1410 ShenandoahHeapRegion* r;
1411 set->clear_current_index();
1412 while ((r = set->next()) != nullptr) {
1413 r->make_trash();
1414 }
1415 collection_set()->clear();
1416 }
1417
1418 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1419 st->print_cr("Heap Regions:");
1420 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1421 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1422 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1423 st->print_cr("UWM=update watermark, U=used");
1424 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1425 st->print_cr("S=shared allocs, L=live data");
1439 ShenandoahHeapRegion* region = start;
1440 size_t index = region->index();
1441 do {
1442 assert(region->is_humongous(), "Expect correct humongous start or continuation");
1443 assert(!region->is_cset(), "Humongous region should not be in collection set");
1444 region->make_trash_immediate();
1445 region = get_region(++index);
1446 } while (region != nullptr && region->is_humongous_continuation());
1447
1448 // Return number of regions trashed
1449 return index - start->index();
1450 }
1451
1452 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1453 public:
1454 ShenandoahCheckCleanGCLABClosure() {}
1455 void do_thread(Thread* thread) {
1456 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1457 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1458 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1459
1460 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1461 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1462 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1463 assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1464 }
1465 }
1466 };
1467
1468 void ShenandoahHeap::labs_make_parsable() {
1469 assert(UseTLAB, "Only call with UseTLAB");
1470
1471 ShenandoahRetireGCLABClosure cl(false);
1472
1473 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1474 ThreadLocalAllocBuffer& tlab = t->tlab();
1475 tlab.make_parsable();
1476 cl.do_thread(t);
1477 }
1478
1479 workers()->threads_do(&cl);
1480
1481 if (safepoint_workers() != nullptr) {
1482 safepoint_workers()->threads_do(&cl);
1483 }
1484 }
1485
1486 void ShenandoahHeap::tlabs_retire(bool resize) {
1487 assert(UseTLAB, "Only call with UseTLAB");
1488 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1489
1490 ThreadLocalAllocStats stats;
1491
1492 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1493 ThreadLocalAllocBuffer& tlab = t->tlab();
1494 tlab.retire(&stats);
1495 if (resize) {
1496 tlab.resize();
1497 }
1498 }
1499
1500 stats.publish();
1501
1502 #ifdef ASSERT
1503 ShenandoahCheckCleanGCLABClosure cl;
1504 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1505 cl.do_thread(t);
1506 }
1507 workers()->threads_do(&cl);
1508 #endif
1509 }
1510
1511 void ShenandoahHeap::gclabs_retire(bool resize) {
1512 assert(UseTLAB, "Only call with UseTLAB");
1513 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1514
1515 ShenandoahRetireGCLABClosure cl(resize);
1516 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1517 cl.do_thread(t);
1518 }
1519
1520 workers()->threads_do(&cl);
1521
1522 if (safepoint_workers() != nullptr) {
1523 safepoint_workers()->threads_do(&cl);
1524 }
1525 }
1526
1527 // Returns size in bytes
1528 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1529 // Return the max allowed size, and let the allocation path
1530 // figure out the safe size for current allocation.
1531 return ShenandoahHeapRegion::max_tlab_size_bytes();
1532 }
1533
1534 size_t ShenandoahHeap::max_tlab_size() const {
1535 // Returns size in words
1536 return ShenandoahHeapRegion::max_tlab_size_words();
1537 }
1538
1539 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1563 }
1564 return nullptr;
1565 }
1566
1567 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1568 ShenandoahHeapRegion* r = heap_region_containing(addr);
1569 return r->block_is_obj(addr);
1570 }
1571
1572 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1573 return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1574 }
1575
1576 void ShenandoahHeap::prepare_for_verify() {
1577 if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1578 labs_make_parsable();
1579 }
1580 }
1581
1582 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1583 if (_shenandoah_policy->is_at_shutdown()) {
1584 return;
1585 }
1586
1587 if (_control_thread != nullptr) {
1588 tcl->do_thread(_control_thread);
1589 }
1590
1591 if (_uncommit_thread != nullptr) {
1592 tcl->do_thread(_uncommit_thread);
1593 }
1594
1595 workers()->threads_do(tcl);
1596 if (_safepoint_workers != nullptr) {
1597 _safepoint_workers->threads_do(tcl);
1598 }
1599 }
1600
1601 void ShenandoahHeap::print_tracing_info() const {
1602 LogTarget(Info, gc, stats) lt;
1603 if (lt.is_enabled()) {
1604 ResourceMark rm;
1605 LogStream ls(lt);
1606
1607 phase_timings()->print_global_on(&ls);
1608
1609 ls.cr();
1610 ls.cr();
1611
1612 shenandoah_policy()->print_gc_stats(&ls);
1613
1614 ls.cr();
1615 ls.cr();
1616 }
1617 }
1618
1619 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1620 shenandoah_assert_control_or_vm_thread_at_safepoint();
1621 _gc_generation = generation;
1622 }
1623
1624 // Active generation may only be set by the VM thread at a safepoint.
1625 void ShenandoahHeap::set_active_generation() {
1626 assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1627 assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1628 assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1629 _active_generation = _gc_generation;
1630 }
1631
1632 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1633 shenandoah_policy()->record_collection_cause(cause);
1634
1635 const GCCause::Cause current = gc_cause();
1636 assert(current == GCCause::_no_gc, "Over-writing cause: %s, with: %s",
1637 GCCause::to_string(current), GCCause::to_string(cause));
1638 assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1639
1640 set_gc_cause(cause);
1641 set_gc_generation(generation);
1642
1643 generation->heuristics()->record_cycle_start();
1644 }
1645
1646 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1647 assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1648 assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1649
1650 generation->heuristics()->record_cycle_end();
1651 if (mode()->is_generational() && generation->is_global()) {
1652 // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1653 young_generation()->heuristics()->record_cycle_end();
1654 old_generation()->heuristics()->record_cycle_end();
1655 }
1656
1657 set_gc_generation(nullptr);
1658 set_gc_cause(GCCause::_no_gc);
1659 }
1660
1661 void ShenandoahHeap::verify(VerifyOption vo) {
1662 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1663 if (ShenandoahVerify) {
1664 verifier()->verify_generic(vo);
1665 } else {
1666 // TODO: Consider allocating verification bitmaps on demand,
1667 // and turn this on unconditionally.
1668 }
1669 }
1670 }
1671 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1672 return _free_set->capacity();
1673 }
1674
1675 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1676 private:
1677 MarkBitMap* _bitmap;
1678 ShenandoahScanObjectStack* _oop_stack;
1679 ShenandoahHeap* const _heap;
1680 ShenandoahMarkingContext* const _marking_context;
1975 const uint active_workers = workers()->active_workers();
1976 const size_t n_regions = num_regions();
1977 size_t stride = ShenandoahParallelRegionStride;
1978 if (stride == 0 && active_workers > 1) {
1979 // Automatically derive the stride to balance the work between threads
1980 // evenly. Do not try to split work if below the reasonable threshold.
1981 constexpr size_t threshold = 4096;
1982 stride = n_regions <= threshold ?
1983 threshold :
1984 (n_regions + active_workers - 1) / active_workers;
1985 }
1986
1987 if (n_regions > stride && active_workers > 1) {
1988 ShenandoahParallelHeapRegionTask task(blk, stride);
1989 workers()->run_task(&task);
1990 } else {
1991 heap_region_iterate(blk);
1992 }
1993 }
1994
1995 class ShenandoahRendezvousClosure : public HandshakeClosure {
1996 public:
1997 inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1998 inline void do_thread(Thread* thread) {}
1999 };
2000
2001 void ShenandoahHeap::rendezvous_threads(const char* name) {
2002 ShenandoahRendezvousClosure cl(name);
2003 Handshake::execute(&cl);
2004 }
2005
2006 void ShenandoahHeap::recycle_trash() {
2007 free_set()->recycle_trash();
2008 }
2009
2010 void ShenandoahHeap::do_class_unloading() {
2011 _unloader.unload();
2012 if (mode()->is_generational()) {
2013 old_generation()->set_parsable(false);
2014 }
2015 }
2016
2017 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
2018 // Weak refs processing
2019 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2020 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2021 ShenandoahTimingsTracker t(phase);
2022 ShenandoahGCWorkerPhase worker_phase(phase);
2023 shenandoah_assert_generations_reconciled();
2024 gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2025 }
2026
2027 void ShenandoahHeap::prepare_update_heap_references() {
2028 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2029
2030 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2031 // make them parsable for update code to work correctly. Plus, we can compute new sizes
2032 // for future GCLABs here.
2033 if (UseTLAB) {
2034 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2035 gclabs_retire(ResizeTLAB);
2036 }
2037
2038 _update_refs_iterator.reset();
2039 }
2040
2041 void ShenandoahHeap::propagate_gc_state_to_all_threads() {
2042 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2043 if (_gc_state_changed) {
2044 ShenandoahGCStatePropagator propagator(_gc_state.raw_value());
2045 Threads::threads_do(&propagator);
2046 _gc_state_changed = false;
2047 }
2048 }
2049
2050 void ShenandoahHeap::set_gc_state_at_safepoint(uint mask, bool value) {
2051 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2052 _gc_state.set_cond(mask, value);
2053 _gc_state_changed = true;
2054 }
2055
2056 void ShenandoahHeap::set_gc_state_concurrent(uint mask, bool value) {
2057 // Holding the thread lock here assures that any thread created after we change the gc
2058 // state will have the correct state. It also prevents attaching threads from seeing
2059 // an inconsistent state. See ShenandoahBarrierSet::on_thread_attach for reference. Established
2060 // threads will use their thread local copy of the gc state (changed by a handshake, or on a
2061 // safepoint).
2062 assert(Threads_lock->is_locked(), "Must hold thread lock for concurrent gc state change");
2063 _gc_state.set_cond(mask, value);
2064 }
2065
2066 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2067 uint mask;
2068 assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
2069 if (!in_progress && is_concurrent_old_mark_in_progress()) {
2070 assert(mode()->is_generational(), "Only generational GC has old marking");
2071 assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
2072 // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
2073 mask = YOUNG_MARKING;
2074 } else {
2075 mask = MARKING | YOUNG_MARKING;
2076 }
2077 set_gc_state_at_safepoint(mask, in_progress);
2078 manage_satb_barrier(in_progress);
2079 }
2080
2081 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2082 #ifdef ASSERT
2083 // has_forwarded_objects() iff UPDATE_REFS or EVACUATION
2084 bool has_forwarded = has_forwarded_objects();
2085 bool updating_or_evacuating = _gc_state.is_set(UPDATE_REFS | EVACUATION);
2086 bool evacuating = _gc_state.is_set(EVACUATION);
2087 assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
2088 "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
2089 #endif
2090 if (!in_progress && is_concurrent_young_mark_in_progress()) {
2091 // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2092 assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2093 set_gc_state_at_safepoint(OLD_MARKING, in_progress);
2094 } else {
2095 set_gc_state_at_safepoint(MARKING | OLD_MARKING, in_progress);
2096 }
2097 manage_satb_barrier(in_progress);
2098 }
2099
2100 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2101 return old_generation()->is_preparing_for_mark();
2102 }
2103
2104 void ShenandoahHeap::manage_satb_barrier(bool active) {
2105 if (is_concurrent_mark_in_progress()) {
2106 // Ignore request to deactivate barrier while concurrent mark is in progress.
2107 // Do not attempt to re-activate the barrier if it is already active.
2108 if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2109 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2110 }
2111 } else {
2112 // No concurrent marking is in progress so honor request to deactivate,
2113 // but only if the barrier is already active.
2114 if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2115 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2116 }
2117 }
2118 }
2119
2120 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2121 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2122 set_gc_state_at_safepoint(EVACUATION, in_progress);
2123 }
2124
2125 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2126 if (in_progress) {
2127 _concurrent_strong_root_in_progress.set();
2128 } else {
2129 _concurrent_strong_root_in_progress.unset();
2130 }
2131 }
2132
2133 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2134 set_gc_state_at_safepoint(WEAK_ROOTS, cond);
2135 }
2136
2137 GCTracer* ShenandoahHeap::tracer() {
2138 return shenandoah_policy()->tracer();
2139 }
2140
2141 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2142 return _free_set->used();
2143 }
2144
2145 bool ShenandoahHeap::try_cancel_gc(GCCause::Cause cause) {
2146 const GCCause::Cause prev = _cancelled_gc.xchg(cause);
2147 return prev == GCCause::_no_gc || prev == GCCause::_shenandoah_concurrent_gc;
2148 }
2149
2150 void ShenandoahHeap::cancel_concurrent_mark() {
2151 if (mode()->is_generational()) {
2152 young_generation()->cancel_marking();
2153 old_generation()->cancel_marking();
2154 }
2155
2156 global_generation()->cancel_marking();
2157
2158 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2159 }
2160
2161 bool ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2162 if (try_cancel_gc(cause)) {
2163 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2164 log_info(gc,thread)("%s", msg.buffer());
2165 Events::log(Thread::current(), "%s", msg.buffer());
2166 _cancel_requested_time = os::elapsedTime();
2167 return true;
2168 }
2169 return false;
2170 }
2171
2172 uint ShenandoahHeap::max_workers() {
2173 return _max_workers;
2174 }
2175
2176 void ShenandoahHeap::stop() {
2177 // The shutdown sequence should be able to terminate when GC is running.
2178
2179 // Step 0. Notify policy to disable event recording.
2180 _shenandoah_policy->record_shutdown();
2181
2182 // Step 1. Stop reporting on gc thread cpu utilization
2183 mmu_tracker()->stop();
2184
2185 // Step 2. Wait until GC worker exits normally (this will cancel any ongoing GC).
2186 control_thread()->stop();
2187
2188 // Stop 4. Shutdown uncommit thread.
2189 if (_uncommit_thread != nullptr) {
2190 _uncommit_thread->stop();
2191 }
2192 }
2193
2194 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2195 if (!unload_classes()) return;
2196 ClassUnloadingContext ctx(_workers->active_workers(),
2197 true /* unregister_nmethods_during_purge */,
2198 false /* lock_codeblob_free_separately */);
2199
2200 // Unload classes and purge SystemDictionary.
2201 {
2202 ShenandoahPhaseTimings::Phase phase = full_gc ?
2203 ShenandoahPhaseTimings::full_gc_purge_class_unload :
2204 ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2205 ShenandoahIsAliveSelector is_alive;
2206 {
2207 CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
2208 ShenandoahGCPhase gc_phase(phase);
2209 ShenandoahGCWorkerPhase worker_phase(phase);
2210 bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
2211
2212 uint num_workers = _workers->active_workers();
2213 ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
2214 _workers->run_task(&unlink_task);
2215 }
2216 // Release unloaded nmethods's memory.
2217 ClassUnloadingContext::context()->purge_and_free_nmethods();
2218 }
2219
2220 {
2221 ShenandoahGCPhase phase(full_gc ?
2222 ShenandoahPhaseTimings::full_gc_purge_cldg :
2223 ShenandoahPhaseTimings::degen_gc_purge_cldg);
2224 ClassLoaderDataGraph::purge(true /* at_safepoint */);
2225 }
2226 // Resize and verify metaspace
2227 MetaspaceGC::compute_new_size();
2228 DEBUG_ONLY(MetaspaceUtils::verify();)
2229 }
2230
2231 // Weak roots are either pre-evacuated (final mark) or updated (final update refs),
2232 // so they should not have forwarded oops.
2233 // However, we do need to "null" dead oops in the roots, if can not be done
2234 // in concurrent cycles.
2235 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2236 uint num_workers = _workers->active_workers();
2237 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2238 ShenandoahPhaseTimings::full_gc_purge_weak_par :
2239 ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2240 ShenandoahGCPhase phase(timing_phase);
2241 ShenandoahGCWorkerPhase worker_phase(timing_phase);
2242 // Cleanup weak roots
2243 if (has_forwarded_objects()) {
2244 ShenandoahForwardedIsAliveClosure is_alive;
2245 ShenandoahUpdateRefsClosure keep_alive;
2246 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2247 cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2248 _workers->run_task(&cleaning_task);
2249 } else {
2250 ShenandoahIsAliveClosure is_alive;
2251 #ifdef ASSERT
2255 #else
2256 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2257 cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2258 #endif
2259 _workers->run_task(&cleaning_task);
2260 }
2261 }
2262
2263 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2264 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2265 assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2266 ShenandoahGCPhase phase(full_gc ?
2267 ShenandoahPhaseTimings::full_gc_purge :
2268 ShenandoahPhaseTimings::degen_gc_purge);
2269 stw_weak_refs(full_gc);
2270 stw_process_weak_roots(full_gc);
2271 stw_unload_classes(full_gc);
2272 }
2273
2274 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2275 set_gc_state_at_safepoint(HAS_FORWARDED, cond);
2276 }
2277
2278 void ShenandoahHeap::set_unload_classes(bool uc) {
2279 _unload_classes.set_cond(uc);
2280 }
2281
2282 bool ShenandoahHeap::unload_classes() const {
2283 return _unload_classes.is_set();
2284 }
2285
2286 address ShenandoahHeap::in_cset_fast_test_addr() {
2287 ShenandoahHeap* heap = ShenandoahHeap::heap();
2288 assert(heap->collection_set() != nullptr, "Sanity");
2289 return (address) heap->collection_set()->biased_map_address();
2290 }
2291
2292 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2293 if (mode()->is_generational()) {
2294 young_generation()->reset_bytes_allocated_since_gc_start();
2295 old_generation()->reset_bytes_allocated_since_gc_start();
2296 }
2297
2298 global_generation()->reset_bytes_allocated_since_gc_start();
2299 }
2300
2301 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2302 _degenerated_gc_in_progress.set_cond(in_progress);
2303 }
2304
2305 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2306 _full_gc_in_progress.set_cond(in_progress);
2307 }
2308
2309 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2310 assert (is_full_gc_in_progress(), "should be");
2311 _full_gc_move_in_progress.set_cond(in_progress);
2312 }
2313
2314 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2315 set_gc_state_at_safepoint(UPDATE_REFS, in_progress);
2316 }
2317
2318 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2319 ShenandoahCodeRoots::register_nmethod(nm);
2320 }
2321
2322 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2323 ShenandoahCodeRoots::unregister_nmethod(nm);
2324 }
2325
2326 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2327 heap_region_containing(o)->record_pin();
2328 }
2329
2330 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2331 ShenandoahHeapRegion* r = heap_region_containing(o);
2332 assert(r != nullptr, "Sanity");
2333 assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
2334 r->record_unpin();
2335 }
2342 if (r->is_active()) {
2343 if (r->is_pinned()) {
2344 if (r->pin_count() == 0) {
2345 r->make_unpinned();
2346 }
2347 } else {
2348 if (r->pin_count() > 0) {
2349 r->make_pinned();
2350 }
2351 }
2352 }
2353 }
2354
2355 assert_pinned_region_status();
2356 }
2357
2358 #ifdef ASSERT
2359 void ShenandoahHeap::assert_pinned_region_status() {
2360 for (size_t i = 0; i < num_regions(); i++) {
2361 ShenandoahHeapRegion* r = get_region(i);
2362 shenandoah_assert_generations_reconciled();
2363 if (gc_generation()->contains(r)) {
2364 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2365 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2366 }
2367 }
2368 }
2369 #endif
2370
2371 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2372 return _gc_timer;
2373 }
2374
2375 void ShenandoahHeap::prepare_concurrent_roots() {
2376 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2377 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2378 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2379 set_concurrent_weak_root_in_progress(true);
2380 if (unload_classes()) {
2381 _unloader.prepare();
2382 }
2383 }
2384
2385 void ShenandoahHeap::finish_concurrent_roots() {
2386 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2387 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2388 if (unload_classes()) {
2389 _unloader.finish();
2390 }
2391 }
2392
2393 #ifdef ASSERT
2394 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2395 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2396
2397 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2398 // Use ParallelGCThreads inside safepoints
2399 assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2400 ParallelGCThreads, nworkers);
2401 } else {
2402 // Use ConcGCThreads outside safepoints
2403 assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2404 ConcGCThreads, nworkers);
2405 }
2406 }
2407 #endif
2408
2409 ShenandoahVerifier* ShenandoahHeap::verifier() {
2410 guarantee(ShenandoahVerify, "Should be enabled");
2411 assert (_verifier != nullptr, "sanity");
2412 return _verifier;
2413 }
2414
2415 template<bool CONCURRENT>
2416 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2417 private:
2418 ShenandoahHeap* _heap;
2419 ShenandoahRegionIterator* _regions;
2420 public:
2421 explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2422 WorkerTask("Shenandoah Update References"),
2423 _heap(ShenandoahHeap::heap()),
2424 _regions(regions) {
2425 }
2426
2427 void work(uint worker_id) {
2428 if (CONCURRENT) {
2429 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2430 ShenandoahSuspendibleThreadSetJoiner stsj;
2431 do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2432 } else {
2433 ShenandoahParallelWorkerSession worker_session(worker_id);
2434 do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2435 }
2436 }
2437
2438 private:
2439 template<class T>
2440 void do_work(uint worker_id) {
2441 if (CONCURRENT && (worker_id == 0)) {
2442 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2443 // results of evacuation. These reserves are no longer necessary because evacuation has completed.
2444 size_t cset_regions = _heap->collection_set()->count();
2445
2446 // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2447 // to the mutator free set. At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2448 // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2449 // next GC cycle.
2450 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2451 }
2452 // If !CONCURRENT, there's no value in expanding Mutator free set
2453 T cl;
2454 ShenandoahHeapRegion* r = _regions->next();
2455 while (r != nullptr) {
2456 HeapWord* update_watermark = r->get_update_watermark();
2457 assert (update_watermark >= r->bottom(), "sanity");
2458 if (r->is_active() && !r->is_cset()) {
2459 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2460 if (ShenandoahPacing) {
2461 _heap->pacer()->report_update_refs(pointer_delta(update_watermark, r->bottom()));
2462 }
2463 }
2464 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2465 return;
2466 }
2467 r = _regions->next();
2468 }
2469 }
2470 };
2471
2472 void ShenandoahHeap::update_heap_references(bool concurrent) {
2473 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2474
2475 if (concurrent) {
2476 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2477 workers()->run_task(&task);
2478 } else {
2479 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2480 workers()->run_task(&task);
2481 }
2482 }
2483
2484 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2485 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2486 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2487
2488 {
2489 ShenandoahGCPhase phase(concurrent ?
2490 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2491 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2492
2493 final_update_refs_update_region_states();
2494
2495 assert_pinned_region_status();
2496 }
2497
2498 {
2499 ShenandoahGCPhase phase(concurrent ?
2500 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2501 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2502 trash_cset_regions();
2503 }
2504 }
2505
2506 void ShenandoahHeap::final_update_refs_update_region_states() {
2507 ShenandoahSynchronizePinnedRegionStates cl;
2508 parallel_heap_region_iterate(&cl);
2509 }
2510
2511 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2512 ShenandoahGCPhase phase(concurrent ?
2513 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2514 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2515 ShenandoahHeapLocker locker(lock());
2516 size_t young_cset_regions, old_cset_regions;
2517 size_t first_old_region, last_old_region, old_region_count;
2518 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2519 // If there are no old regions, first_old_region will be greater than last_old_region
2520 assert((first_old_region > last_old_region) ||
2521 ((last_old_region + 1 - first_old_region >= old_region_count) &&
2522 get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2523 "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT,
2524 old_region_count, first_old_region, last_old_region);
2525
2526 if (mode()->is_generational()) {
2527 #ifdef ASSERT
2528 if (ShenandoahVerify) {
2529 verifier()->verify_before_rebuilding_free_set();
2530 }
2531 #endif
2532
2533 // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2534 // available for transfer to old. Note that transfer of humongous regions does not impact available.
2535 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2536 size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2537 gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2538
2539 // Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
2540 // memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
2541 // regions in place when many of these regular regions have an abundant amount of available memory within them. Fragmentation
2542 // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2543 //
2544 // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2545 // within partially consumed regions of memory.
2546 }
2547 // Rebuild free set based on adjusted generation sizes.
2548 _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2549
2550 if (mode()->is_generational()) {
2551 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2552 ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2553 old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2554 }
2555 }
2556
2557 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2558 print_on(st);
2559 st->cr();
2560 print_heap_regions_on(st);
2561 }
2562
2563 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2564 size_t slice = r->index() / _bitmap_regions_per_slice;
2565
2566 size_t regions_from = _bitmap_regions_per_slice * slice;
2567 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2568 for (size_t g = regions_from; g < regions_to; g++) {
2569 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2570 if (skip_self && g == r->index()) continue;
2571 if (get_region(g)->is_committed()) {
2572 return true;
2573 }
2599 return false;
2600 }
2601
2602 if (AlwaysPreTouch) {
2603 os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2604 }
2605
2606 return true;
2607 }
2608
2609 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2610 shenandoah_assert_heaplocked();
2611
2612 // Bitmaps in special regions do not need uncommits
2613 if (_bitmap_region_special) {
2614 return true;
2615 }
2616
2617 if (is_bitmap_slice_committed(r, true)) {
2618 // Some other region from the group is still committed, meaning the bitmap
2619 // slice should stay committed, exit right away.
2620 return true;
2621 }
2622
2623 // Uncommit the bitmap slice:
2624 size_t slice = r->index() / _bitmap_regions_per_slice;
2625 size_t off = _bitmap_bytes_per_slice * slice;
2626 size_t len = _bitmap_bytes_per_slice;
2627 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2628 return false;
2629 }
2630 return true;
2631 }
2632
2633 void ShenandoahHeap::forbid_uncommit() {
2634 if (_uncommit_thread != nullptr) {
2635 _uncommit_thread->forbid_uncommit();
2636 }
2637 }
2638
2639 void ShenandoahHeap::allow_uncommit() {
2640 if (_uncommit_thread != nullptr) {
2641 _uncommit_thread->allow_uncommit();
2642 }
2643 }
2644
2645 #ifdef ASSERT
2646 bool ShenandoahHeap::is_uncommit_in_progress() {
2647 if (_uncommit_thread != nullptr) {
2648 return _uncommit_thread->is_uncommit_in_progress();
2649 }
2650 return false;
2651 }
2652 #endif
2653
2654 void ShenandoahHeap::safepoint_synchronize_begin() {
2655 SuspendibleThreadSet::synchronize();
2656 }
2657
2658 void ShenandoahHeap::safepoint_synchronize_end() {
2659 SuspendibleThreadSet::desynchronize();
2660 }
2661
2662 void ShenandoahHeap::try_inject_alloc_failure() {
2663 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2664 _inject_alloc_failure.set();
2665 os::naked_short_sleep(1);
2666 if (cancelled_gc()) {
2667 log_info(gc)("Allocation failure was successfully injected");
2668 }
2669 }
2670 }
2671
2672 bool ShenandoahHeap::should_inject_alloc_failure() {
2673 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2674 }
2675
2676 void ShenandoahHeap::initialize_serviceability() {
2677 _memory_pool = new ShenandoahMemoryPool(this);
2678 _cycle_memory_manager.add_pool(_memory_pool);
2679 _stw_memory_manager.add_pool(_memory_pool);
2680 }
2681
2682 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2683 GrowableArray<GCMemoryManager*> memory_managers(2);
2684 memory_managers.append(&_cycle_memory_manager);
2685 memory_managers.append(&_stw_memory_manager);
2686 return memory_managers;
2687 }
2688
2689 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2690 GrowableArray<MemoryPool*> memory_pools(1);
2691 memory_pools.append(_memory_pool);
2692 return memory_pools;
2693 }
2694
2695 MemoryUsage ShenandoahHeap::memory_usage() {
2696 return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2697 }
2698
2699 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2700 _heap(ShenandoahHeap::heap()),
2701 _index(0) {}
2702
2703 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2704 _heap(heap),
2705 _index(0) {}
2706
2707 void ShenandoahRegionIterator::reset() {
2708 _index = 0;
2709 }
2710
2711 bool ShenandoahRegionIterator::has_next() const {
2712 return _index < _heap->num_regions();
2713 }
2714
2715 char ShenandoahHeap::gc_state() const {
2716 return _gc_state.raw_value();
2717 }
2718
2719 bool ShenandoahHeap::is_gc_state(GCState state) const {
2720 // If the global gc state has been changed, but hasn't yet been propagated to all threads, then
2721 // the global gc state is the correct value. Once the gc state has been synchronized with all threads,
2722 // _gc_state_changed will be toggled to false and we need to use the thread local state.
2723 return _gc_state_changed ? _gc_state.is_set(state) : ShenandoahThreadLocalData::is_gc_state(state);
2724 }
2725
2726
2727 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2728 #ifdef ASSERT
2729 assert(_liveness_cache != nullptr, "sanity");
2730 assert(worker_id < _max_workers, "sanity");
2731 for (uint i = 0; i < num_regions(); i++) {
2732 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2733 }
2734 #endif
2735 return _liveness_cache[worker_id];
2736 }
2737
2738 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2739 assert(worker_id < _max_workers, "sanity");
2740 assert(_liveness_cache != nullptr, "sanity");
2741 ShenandoahLiveData* ld = _liveness_cache[worker_id];
2742 for (uint i = 0; i < num_regions(); i++) {
2743 ShenandoahLiveData live = ld[i];
2744 if (live > 0) {
2745 ShenandoahHeapRegion* r = get_region(i);
2746 r->increase_live_data_gc_words(live);
2749 }
2750 }
2751
2752 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2753 if (is_idle()) return false;
2754
2755 // Objects allocated after marking start are implicitly alive, don't need any barriers during
2756 // marking phase.
2757 if (is_concurrent_mark_in_progress() &&
2758 !marking_context()->allocated_after_mark_start(obj)) {
2759 return true;
2760 }
2761
2762 // Can not guarantee obj is deeply good.
2763 if (has_forwarded_objects()) {
2764 return true;
2765 }
2766
2767 return false;
2768 }
2769
2770 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2771 if (!mode()->is_generational()) {
2772 return global_generation();
2773 } else if (affiliation == YOUNG_GENERATION) {
2774 return young_generation();
2775 } else if (affiliation == OLD_GENERATION) {
2776 return old_generation();
2777 }
2778
2779 ShouldNotReachHere();
2780 return nullptr;
2781 }
2782
2783 void ShenandoahHeap::log_heap_status(const char* msg) const {
2784 if (mode()->is_generational()) {
2785 young_generation()->log_status(msg);
2786 old_generation()->log_status(msg);
2787 } else {
2788 global_generation()->log_status(msg);
2789 }
2790 }
|