1 /*
2 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "memory/allocation.hpp"
28 #include "memory/universe.hpp"
29
30 #include "gc/shared/classUnloadingContext.hpp"
31 #include "gc/shared/gcArguments.hpp"
32 #include "gc/shared/gcTimer.hpp"
33 #include "gc/shared/gcTraceTime.inline.hpp"
34 #include "gc/shared/locationPrinter.inline.hpp"
35 #include "gc/shared/memAllocator.hpp"
36 #include "gc/shared/plab.hpp"
37 #include "gc/shared/tlab_globals.hpp"
38
39 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
40 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
41 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
42 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
43 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahControlThread.hpp"
46 #include "gc/shenandoah/shenandoahFreeSet.hpp"
47 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
48 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
49 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
50 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
51 #include "gc/shenandoah/shenandoahInitLogger.hpp"
52 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
53 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
54 #include "gc/shenandoah/shenandoahMetrics.hpp"
55 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
56 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
58 #include "gc/shenandoah/shenandoahPadding.hpp"
59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
60 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
61 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
63 #include "gc/shenandoah/shenandoahUtils.hpp"
64 #include "gc/shenandoah/shenandoahVerifier.hpp"
65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
69 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
70 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
71 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
72 #if INCLUDE_JFR
73 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
74 #endif
75
76 #include "classfile/systemDictionary.hpp"
77 #include "code/codeCache.hpp"
78 #include "memory/classLoaderMetaspace.hpp"
79 #include "memory/metaspaceUtils.hpp"
80 #include "nmt/mallocTracker.hpp"
81 #include "nmt/memTracker.hpp"
82 #include "oops/compressedOops.inline.hpp"
83 #include "prims/jvmtiTagMap.hpp"
84 #include "runtime/atomic.hpp"
85 #include "runtime/globals.hpp"
86 #include "runtime/interfaceSupport.inline.hpp"
87 #include "runtime/java.hpp"
88 #include "runtime/orderAccess.hpp"
89 #include "runtime/safepointMechanism.hpp"
90 #include "runtime/stackWatermarkSet.hpp"
91 #include "runtime/vmThread.hpp"
144 jint ShenandoahHeap::initialize() {
145 //
146 // Figure out heap sizing
147 //
148
149 size_t init_byte_size = InitialHeapSize;
150 size_t min_byte_size = MinHeapSize;
151 size_t max_byte_size = MaxHeapSize;
152 size_t heap_alignment = HeapAlignment;
153
154 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
155
156 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
157 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
158
159 _num_regions = ShenandoahHeapRegion::region_count();
160 assert(_num_regions == (max_byte_size / reg_size_bytes),
161 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
162 _num_regions, max_byte_size, reg_size_bytes);
163
164 // Now we know the number of regions, initialize the heuristics.
165 initialize_heuristics();
166
167 size_t num_committed_regions = init_byte_size / reg_size_bytes;
168 num_committed_regions = MIN2(num_committed_regions, _num_regions);
169 assert(num_committed_regions <= _num_regions, "sanity");
170 _initial_size = num_committed_regions * reg_size_bytes;
171
172 size_t num_min_regions = min_byte_size / reg_size_bytes;
173 num_min_regions = MIN2(num_min_regions, _num_regions);
174 assert(num_min_regions <= _num_regions, "sanity");
175 _minimum_size = num_min_regions * reg_size_bytes;
176
177 // Default to max heap size.
178 _soft_max_size = _num_regions * reg_size_bytes;
179
180 _committed = _initial_size;
181
182 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
183 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
184 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
185
186 //
200 heap_rs.size(), heap_rs.page_size());
201
202 #if SHENANDOAH_OPTIMIZED_MARKTASK
203 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
204 // Fail if we ever attempt to address more than we can.
205 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
206 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
207 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
208 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
209 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
210 vm_exit_during_initialization("Fatal Error", buf);
211 }
212 #endif
213
214 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
215 if (!_heap_region_special) {
216 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
217 "Cannot commit heap memory");
218 }
219
220 //
221 // Reserve and commit memory for bitmap(s)
222 //
223
224 size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
225 _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
226
227 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
228
229 guarantee(bitmap_bytes_per_region != 0,
230 "Bitmap bytes per region should not be zero");
231 guarantee(is_power_of_2(bitmap_bytes_per_region),
232 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
233
234 if (bitmap_page_size > bitmap_bytes_per_region) {
235 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
236 _bitmap_bytes_per_slice = bitmap_page_size;
237 } else {
238 _bitmap_regions_per_slice = 1;
239 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
240 }
241
242 guarantee(_bitmap_regions_per_slice >= 1,
243 "Should have at least one region per slice: " SIZE_FORMAT,
244 _bitmap_regions_per_slice);
245
246 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
247 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
248 _bitmap_bytes_per_slice, bitmap_page_size);
249
250 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
251 os::trace_page_sizes_for_requested_size("Mark Bitmap",
252 bitmap_size_orig, bitmap_page_size,
253 bitmap.base(),
254 bitmap.size(), bitmap.page_size());
255 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
256 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
257 _bitmap_region_special = bitmap.special();
258
259 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
260 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
261 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
262 if (!_bitmap_region_special) {
263 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
264 "Cannot commit bitmap memory");
265 }
266
267 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
268
269 if (ShenandoahVerify) {
270 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
271 os::trace_page_sizes_for_requested_size("Verify Bitmap",
272 bitmap_size_orig, bitmap_page_size,
273 verify_bitmap.base(),
274 verify_bitmap.size(), verify_bitmap.page_size());
275 if (!verify_bitmap.special()) {
276 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
277 "Cannot commit verification bitmap memory");
278 }
279 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
280 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
281 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
282 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
283 }
284
285 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
286 size_t aux_bitmap_page_size = bitmap_page_size;
287 #ifdef LINUX
338 assert(is_aligned(req_addr, cset_align), "Should be aligned");
339 cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
340 if (cset_rs.is_reserved()) {
341 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
342 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
343 break;
344 }
345 }
346
347 if (_collection_set == nullptr) {
348 cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
349 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
350 }
351 os::trace_page_sizes_for_requested_size("Collection Set",
352 cset_size, cset_page_size,
353 cset_rs.base(),
354 cset_rs.size(), cset_rs.page_size());
355 }
356
357 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
358 _free_set = new ShenandoahFreeSet(this, _num_regions);
359
360 {
361 ShenandoahHeapLocker locker(lock());
362
363 for (size_t i = 0; i < _num_regions; i++) {
364 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
365 bool is_committed = i < num_committed_regions;
366 void* loc = region_storage.base() + i * region_align;
367
368 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
369 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
370
371 _marking_context->initialize_top_at_mark_start(r);
372 _regions[i] = r;
373 assert(!collection_set()->is_in(i), "New region should not be in collection set");
374 }
375
376 // Initialize to complete
377 _marking_context->mark_complete();
378
379 _free_set->rebuild();
380 }
381
382 if (AlwaysPreTouch) {
383 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
384 // before initialize() below zeroes it with initializing thread. For any given region,
385 // we touch the region and the corresponding bitmaps from the same thread.
386 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
387
388 _pretouch_heap_page_size = heap_page_size;
389 _pretouch_bitmap_page_size = bitmap_page_size;
390
391 #ifdef LINUX
392 // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
393 // pages. But, the kernel needs to know that every small page is used, in order to coalesce
394 // them into huge one. Therefore, we need to pretouch with smaller pages.
395 if (UseTransparentHugePages) {
396 _pretouch_heap_page_size = (size_t)os::vm_page_size();
397 _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
398 }
399 #endif
418 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
419 }
420
421 // There should probably be Shenandoah-specific options for these,
422 // just as there are G1-specific options.
423 {
424 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
425 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
426 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
427 }
428
429 _monitoring_support = new ShenandoahMonitoringSupport(this);
430 _phase_timings = new ShenandoahPhaseTimings(max_workers());
431 ShenandoahCodeRoots::initialize();
432
433 if (ShenandoahPacing) {
434 _pacer = new ShenandoahPacer(this);
435 _pacer->setup_for_idle();
436 }
437
438 _control_thread = new ShenandoahControlThread();
439
440 ShenandoahInitLogger::print();
441
442 return JNI_OK;
443 }
444
445 void ShenandoahHeap::initialize_mode() {
446 if (ShenandoahGCMode != nullptr) {
447 if (strcmp(ShenandoahGCMode, "satb") == 0) {
448 _gc_mode = new ShenandoahSATBMode();
449 } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
450 _gc_mode = new ShenandoahIUMode();
451 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
452 _gc_mode = new ShenandoahPassiveMode();
453 } else {
454 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
455 }
456 } else {
457 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
458 }
459 _gc_mode->initialize_flags();
460 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
461 vm_exit_during_initialization(
462 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
463 _gc_mode->name()));
464 }
465 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
466 vm_exit_during_initialization(
467 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
468 _gc_mode->name()));
469 }
470 }
471
472 void ShenandoahHeap::initialize_heuristics() {
473 assert(_gc_mode != nullptr, "Must be initialized");
474 _heuristics = _gc_mode->initialize_heuristics();
475
476 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
477 vm_exit_during_initialization(
478 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
479 _heuristics->name()));
480 }
481 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
482 vm_exit_during_initialization(
483 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
484 _heuristics->name()));
485 }
486 }
487
488 #ifdef _MSC_VER
489 #pragma warning( push )
490 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
491 #endif
492
493 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
494 CollectedHeap(),
495 _initial_size(0),
496 _used(0),
497 _committed(0),
498 _bytes_allocated_since_gc_start(0),
499 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
500 _workers(nullptr),
501 _safepoint_workers(nullptr),
502 _heap_region_special(false),
503 _num_regions(0),
504 _regions(nullptr),
505 _update_refs_iterator(this),
506 _gc_state_changed(false),
507 _gc_no_progress_count(0),
508 _control_thread(nullptr),
509 _shenandoah_policy(policy),
510 _gc_mode(nullptr),
511 _heuristics(nullptr),
512 _free_set(nullptr),
513 _pacer(nullptr),
514 _verifier(nullptr),
515 _phase_timings(nullptr),
516 _monitoring_support(nullptr),
517 _memory_pool(nullptr),
518 _stw_memory_manager("Shenandoah Pauses"),
519 _cycle_memory_manager("Shenandoah Cycles"),
520 _gc_timer(new ConcurrentGCTimer()),
521 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
522 _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
523 _marking_context(nullptr),
524 _bitmap_size(0),
525 _bitmap_regions_per_slice(0),
526 _bitmap_bytes_per_slice(0),
527 _bitmap_region_special(false),
528 _aux_bitmap_region_special(false),
529 _liveness_cache(nullptr),
530 _collection_set(nullptr)
531 {
532 // Initialize GC mode early, so we can adjust barrier support
533 initialize_mode();
534 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
535
536 _max_workers = MAX2(_max_workers, 1U);
537 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
538 if (_workers == nullptr) {
539 vm_exit_during_initialization("Failed necessary allocation.");
540 } else {
541 _workers->initialize_workers();
542 }
543
544 if (ParallelGCThreads > 1) {
545 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
546 ParallelGCThreads);
547 _safepoint_workers->initialize_workers();
548 }
549 }
550
551 #ifdef _MSC_VER
552 #pragma warning( pop )
553 #endif
554
555 class ShenandoahResetBitmapTask : public WorkerTask {
556 private:
557 ShenandoahRegionIterator _regions;
558
559 public:
560 ShenandoahResetBitmapTask() :
561 WorkerTask("Shenandoah Reset Bitmap") {}
562
563 void work(uint worker_id) {
564 ShenandoahHeapRegion* region = _regions.next();
565 ShenandoahHeap* heap = ShenandoahHeap::heap();
566 ShenandoahMarkingContext* const ctx = heap->marking_context();
567 while (region != nullptr) {
568 if (heap->is_bitmap_slice_committed(region)) {
569 ctx->clear_bitmap(region);
570 }
571 region = _regions.next();
572 }
573 }
574 };
575
576 void ShenandoahHeap::reset_mark_bitmap() {
577 assert_gc_workers(_workers->active_workers());
578 mark_incomplete_marking_context();
579
580 ShenandoahResetBitmapTask task;
581 _workers->run_task(&task);
582 }
583
584 void ShenandoahHeap::print_on(outputStream* st) const {
585 st->print_cr("Shenandoah Heap");
586 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
587 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
588 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
589 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
590 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
591 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
592 num_regions(),
593 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
594 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
595
596 st->print("Status: ");
597 if (has_forwarded_objects()) st->print("has forwarded objects, ");
598 if (is_concurrent_mark_in_progress()) st->print("marking, ");
599 if (is_evacuation_in_progress()) st->print("evacuating, ");
600 if (is_update_refs_in_progress()) st->print("updating refs, ");
601 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
602 if (is_full_gc_in_progress()) st->print("full gc, ");
603 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
604 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
605 if (is_concurrent_strong_root_in_progress() &&
606 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
607
608 if (cancelled_gc()) {
609 st->print("cancelled");
610 } else {
611 st->print("not cancelled");
612 }
613 st->cr();
614
615 st->print_cr("Reserved region:");
616 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
617 p2i(reserved_region().start()),
618 p2i(reserved_region().end()));
629 st->cr();
630 MetaspaceUtils::print_on(st);
631
632 if (Verbose) {
633 st->cr();
634 print_heap_regions_on(st);
635 }
636 }
637
638 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
639 public:
640 void do_thread(Thread* thread) {
641 assert(thread != nullptr, "Sanity");
642 assert(thread->is_Worker_thread(), "Only worker thread expected");
643 ShenandoahThreadLocalData::initialize_gclab(thread);
644 }
645 };
646
647 void ShenandoahHeap::post_initialize() {
648 CollectedHeap::post_initialize();
649 MutexLocker ml(Threads_lock);
650
651 ShenandoahInitWorkerGCLABClosure init_gclabs;
652 _workers->threads_do(&init_gclabs);
653
654 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
655 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
656 _workers->set_initialize_gclab();
657 if (_safepoint_workers != nullptr) {
658 _safepoint_workers->threads_do(&init_gclabs);
659 _safepoint_workers->set_initialize_gclab();
660 }
661
662 _heuristics->initialize();
663
664 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
665 }
666
667 size_t ShenandoahHeap::used() const {
668 return Atomic::load(&_used);
669 }
670
671 size_t ShenandoahHeap::committed() const {
672 return Atomic::load(&_committed);
673 }
674
675 size_t ShenandoahHeap::available() const {
676 return free_set()->available();
677 }
678
679 void ShenandoahHeap::increase_committed(size_t bytes) {
680 shenandoah_assert_heaplocked_or_safepoint();
681 _committed += bytes;
682 }
683
684 void ShenandoahHeap::decrease_committed(size_t bytes) {
685 shenandoah_assert_heaplocked_or_safepoint();
686 _committed -= bytes;
687 }
688
689 void ShenandoahHeap::increase_used(size_t bytes) {
690 Atomic::add(&_used, bytes, memory_order_relaxed);
691 }
692
693 void ShenandoahHeap::set_used(size_t bytes) {
694 Atomic::store(&_used, bytes);
695 }
696
697 void ShenandoahHeap::decrease_used(size_t bytes) {
698 assert(used() >= bytes, "never decrease heap size by more than we've left");
699 Atomic::sub(&_used, bytes, memory_order_relaxed);
700 }
701
702 void ShenandoahHeap::increase_allocated(size_t bytes) {
703 Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
704 }
705
706 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
707 size_t bytes = words * HeapWordSize;
708 if (!waste) {
709 increase_used(bytes);
710 }
711 increase_allocated(bytes);
712 if (ShenandoahPacing) {
713 control_thread()->pacing_notify_alloc(words);
714 if (waste) {
715 pacer()->claim_for_alloc(words, true);
716 }
717 }
718 }
719
720 size_t ShenandoahHeap::capacity() const {
721 return committed();
722 }
723
724 size_t ShenandoahHeap::max_capacity() const {
725 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
726 }
727
728 size_t ShenandoahHeap::soft_max_capacity() const {
729 size_t v = Atomic::load(&_soft_max_size);
730 assert(min_capacity() <= v && v <= max_capacity(),
731 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
732 min_capacity(), v, max_capacity());
733 return v;
734 }
735
736 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
737 assert(min_capacity() <= v && v <= max_capacity(),
738 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
739 min_capacity(), v, max_capacity());
740 Atomic::store(&_soft_max_size, v);
741 }
742
743 size_t ShenandoahHeap::min_capacity() const {
744 return _minimum_size;
745 }
746
747 size_t ShenandoahHeap::initial_capacity() const {
748 return _initial_size;
749 }
750
751 bool ShenandoahHeap::is_in(const void* p) const {
752 HeapWord* heap_base = (HeapWord*) base();
753 HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
754 return p >= heap_base && p < last_region_end;
755 }
756
757 void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
758 assert (ShenandoahUncommit, "should be enabled");
759
760 // Determine if there is work to do. This avoids taking heap lock if there is
761 // no work available, avoids spamming logs with superfluous logging messages,
762 // and minimises the amount of work while locks are taken.
763
764 if (committed() <= shrink_until) return;
765
766 bool has_work = false;
767 for (size_t i = 0; i < num_regions(); i++) {
768 ShenandoahHeapRegion* r = get_region(i);
769 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
770 has_work = true;
771 break;
772 }
773 }
774
775 if (has_work) {
776 static const char* msg = "Concurrent uncommit";
816 size_t old_soft_max = soft_max_capacity();
817 if (new_soft_max != old_soft_max) {
818 new_soft_max = MAX2(min_capacity(), new_soft_max);
819 new_soft_max = MIN2(max_capacity(), new_soft_max);
820 if (new_soft_max != old_soft_max) {
821 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
822 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
823 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
824 );
825 set_soft_max_capacity(new_soft_max);
826 return true;
827 }
828 }
829 return false;
830 }
831
832 void ShenandoahHeap::notify_heap_changed() {
833 // Update monitoring counters when we took a new region. This amortizes the
834 // update costs on slow path.
835 monitoring_support()->notify_heap_changed();
836
837 // This is called from allocation path, and thus should be fast.
838 _heap_changed.try_set();
839 }
840
841 void ShenandoahHeap::set_forced_counters_update(bool value) {
842 monitoring_support()->set_forced_counters_update(value);
843 }
844
845 void ShenandoahHeap::handle_force_counters_update() {
846 monitoring_support()->handle_force_counters_update();
847 }
848
849 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
850 // New object should fit the GCLAB size
851 size_t min_size = MAX2(size, PLAB::min_size());
852
853 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
854 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
855 new_size = MIN2(new_size, PLAB::max_size());
856 new_size = MAX2(new_size, PLAB::min_size());
857
858 // Record new heuristic value even if we take any shortcut. This captures
859 // the case when moderately-sized objects always take a shortcut. At some point,
860 // heuristics should catch up with them.
861 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
862
863 if (new_size < size) {
864 // New size still does not fit the object. Fall back to shared allocation.
865 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
866 return nullptr;
867 }
868
869 // Retire current GCLAB, and allocate a new one.
870 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
871 gclab->retire();
872
873 size_t actual_size = 0;
874 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
875 if (gclab_buf == nullptr) {
876 return nullptr;
877 }
878
879 assert (size <= actual_size, "allocation should fit");
880
881 // ...and clear or zap just allocated TLAB, if needed.
882 if (ZeroTLAB) {
883 Copy::zero_to_words(gclab_buf, actual_size);
884 } else if (ZapTLAB) {
885 // Skip mangling the space corresponding to the object header to
886 // ensure that the returned space is not considered parsable by
887 // any concurrent GC thread.
888 size_t hdr_size = oopDesc::header_size();
889 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
890 }
891 gclab->set_buf(gclab_buf, actual_size);
892 return gclab->allocate(size);
893 }
894
895 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
896 size_t requested_size,
897 size_t* actual_size) {
898 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
899 HeapWord* res = allocate_memory(req);
900 if (res != nullptr) {
901 *actual_size = req.actual_size();
902 } else {
903 *actual_size = 0;
904 }
905 return res;
906 }
907
908 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
909 size_t word_size,
910 size_t* actual_size) {
911 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
912 HeapWord* res = allocate_memory(req);
913 if (res != nullptr) {
914 *actual_size = req.actual_size();
923 bool in_new_region = false;
924 HeapWord* result = nullptr;
925
926 if (req.is_mutator_alloc()) {
927 if (ShenandoahPacing) {
928 pacer()->pace_for_alloc(req.size());
929 pacer_epoch = pacer()->epoch();
930 }
931
932 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
933 result = allocate_memory_under_lock(req, in_new_region);
934 }
935
936 // Check that gc overhead is not exceeded.
937 //
938 // Shenandoah will grind along for quite a while allocating one
939 // object at a time using shared (non-tlab) allocations. This check
940 // is testing that the GC overhead limit has not been exceeded.
941 // This will notify the collector to start a cycle, but will raise
942 // an OOME to the mutator if the last Full GCs have not made progress.
943 if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
944 control_thread()->handle_alloc_failure(req, false);
945 return nullptr;
946 }
947
948 if (result == nullptr) {
949 // Block until control thread reacted, then retry allocation.
950 //
951 // It might happen that one of the threads requesting allocation would unblock
952 // way later after GC happened, only to fail the second allocation, because
953 // other threads have already depleted the free storage. In this case, a better
954 // strategy is to try again, until at least one full GC has completed.
955 //
956 // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
957 // a) We experienced a GC that had good progress, or
958 // b) We experienced at least one Full GC (whether or not it had good progress)
959 //
960 // TODO: Consider GLOBAL GC rather than Full GC to remediate OOM condition: https://bugs.openjdk.org/browse/JDK-8335910
961
962 size_t original_count = shenandoah_policy()->full_gc_count();
963 while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
964 control_thread()->handle_alloc_failure(req, true);
970 }
971 if (log_is_enabled(Debug, gc, alloc)) {
972 ResourceMark rm;
973 log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
974 ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
975 Thread::current()->name(), p2i(result), req.type_string(), req.size(),
976 original_count, get_gc_no_progress_count());
977 }
978 }
979 } else {
980 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
981 result = allocate_memory_under_lock(req, in_new_region);
982 // Do not call handle_alloc_failure() here, because we cannot block.
983 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
984 }
985
986 if (in_new_region) {
987 notify_heap_changed();
988 }
989
990 if (result != nullptr) {
991 size_t requested = req.size();
992 size_t actual = req.actual_size();
993
994 assert (req.is_lab_alloc() || (requested == actual),
995 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
996 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
997
998 if (req.is_mutator_alloc()) {
999 notify_mutator_alloc_words(actual, false);
1000
1001 // If we requested more than we were granted, give the rest back to pacer.
1002 // This only matters if we are in the same pacing epoch: do not try to unpace
1003 // over the budget for the other phase.
1004 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1005 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1006 }
1007 } else {
1008 increase_used(actual*HeapWordSize);
1009 }
1010 }
1011
1012 return result;
1013 }
1014
1015 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1016 // If we are dealing with mutator allocation, then we may need to block for safepoint.
1017 // We cannot block for safepoint for GC allocations, because there is a high chance
1018 // we are already running at safepoint or from stack watermark machinery, and we cannot
1019 // block again.
1020 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1021 return _free_set->allocate(req, in_new_region);
1022 }
1023
1024 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1025 bool* gc_overhead_limit_was_exceeded) {
1026 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1027 return allocate_memory(req);
1028 }
1029
1030 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1031 size_t size,
1032 Metaspace::MetadataType mdtype) {
1033 MetaWord* result;
1034
1035 // Inform metaspace OOM to GC heuristics if class unloading is possible.
1036 if (heuristics()->can_unload_classes()) {
1037 ShenandoahHeuristics* h = heuristics();
1038 h->record_metaspace_oom();
1039 }
1040
1041 // Expand and retry allocation
1042 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1043 if (result != nullptr) {
1044 return result;
1045 }
1046
1047 // Start full GC
1048 collect(GCCause::_metadata_GC_clear_soft_refs);
1049
1050 // Retry allocation
1051 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1052 if (result != nullptr) {
1053 return result;
1054 }
1055
1056 // Expand and retry allocation
1057 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1110 private:
1111 void do_work() {
1112 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1113 ShenandoahHeapRegion* r;
1114 while ((r =_cs->claim_next()) != nullptr) {
1115 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1116 _sh->marked_object_iterate(r, &cl);
1117
1118 if (ShenandoahPacing) {
1119 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1120 }
1121
1122 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1123 break;
1124 }
1125 }
1126 }
1127 };
1128
1129 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1130 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1131 workers()->run_task(&task);
1132 }
1133
1134 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1135 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
1136 // This thread went through the OOM during evac protocol and it is safe to return
1137 // the forward pointer. It must not attempt to evacuate any more.
1138 return ShenandoahBarrierSet::resolve_forwarded(p);
1139 }
1140
1141 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1142
1143 size_t size = p->size();
1144
1145 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
1146
1147 bool alloc_from_gclab = true;
1148 HeapWord* copy = nullptr;
1149
1150 #ifdef ASSERT
1151 if (ShenandoahOOMDuringEvacALot &&
1152 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1153 copy = nullptr;
1154 } else {
1155 #endif
1156 if (UseTLAB) {
1157 copy = allocate_from_gclab(thread, size);
1158 }
1159 if (copy == nullptr) {
1160 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
1161 copy = allocate_memory(req);
1162 alloc_from_gclab = false;
1163 }
1164 #ifdef ASSERT
1165 }
1166 #endif
1167
1168 if (copy == nullptr) {
1169 control_thread()->handle_alloc_failure_evac(size);
1170
1171 _oom_evac_handler.handle_out_of_memory_during_evacuation();
1172
1173 return ShenandoahBarrierSet::resolve_forwarded(p);
1174 }
1175
1176 // Copy the object:
1177 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1178
1179 // Try to install the new forwarding pointer.
1180 oop copy_val = cast_to_oop(copy);
1181 ContinuationGCSupport::relativize_stack_chunk(copy_val);
1182
1183 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1184 if (result == copy_val) {
1185 // Successfully evacuated. Our copy is now the public one!
1186 shenandoah_assert_correct(nullptr, copy_val);
1187 return copy_val;
1188 } else {
1189 // Failed to evacuate. We need to deal with the object that is left behind. Since this
1190 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1191 // But if it happens to contain references to evacuated regions, those references would
1192 // not get updated for this stale copy during this cycle, and we will crash while scanning
1193 // it the next cycle.
1194 //
1195 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
1196 // object will overwrite this stale copy, or the filler object on LAB retirement will
1197 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
1198 // have to explicitly overwrite the copy with the filler object. With that overwrite,
1199 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1200 if (alloc_from_gclab) {
1201 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1202 } else {
1203 fill_with_object(copy, size);
1204 shenandoah_assert_correct(nullptr, copy_val);
1205 }
1206 shenandoah_assert_correct(nullptr, result);
1207 return result;
1208 }
1209 }
1210
1211 void ShenandoahHeap::trash_cset_regions() {
1212 ShenandoahHeapLocker locker(lock());
1213
1214 ShenandoahCollectionSet* set = collection_set();
1215 ShenandoahHeapRegion* r;
1216 set->clear_current_index();
1217 while ((r = set->next()) != nullptr) {
1218 r->make_trash();
1219 }
1220 collection_set()->clear();
1221 }
1222
1223 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1224 st->print_cr("Heap Regions:");
1225 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1226 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1227 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1228 st->print_cr("UWM=update watermark, U=used");
1229 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1230 st->print_cr("S=shared allocs, L=live data");
1231 st->print_cr("CP=critical pins");
1232
1233 for (size_t i = 0; i < num_regions(); i++) {
1234 get_region(i)->print_on(st);
1235 }
1236 }
1237
1238 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1239 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1240
1241 oop humongous_obj = cast_to_oop(start->bottom());
1242 size_t size = humongous_obj->size();
1243 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1244 size_t index = start->index() + required_regions - 1;
1245
1246 assert(!start->has_live(), "liveness must be zero");
1247
1248 for(size_t i = 0; i < required_regions; i++) {
1249 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1250 // as it expects that every region belongs to a humongous region starting with a humongous start region.
1251 ShenandoahHeapRegion* region = get_region(index --);
1252
1253 assert(region->is_humongous(), "expect correct humongous start or continuation");
1254 assert(!region->is_cset(), "Humongous region should not be in collection set");
1255
1256 region->make_trash_immediate();
1257 }
1258 }
1259
1260 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1261 public:
1262 ShenandoahCheckCleanGCLABClosure() {}
1263 void do_thread(Thread* thread) {
1264 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1265 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1266 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1267 }
1268 };
1269
1270 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1271 private:
1272 bool const _resize;
1273 public:
1274 ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1275 void do_thread(Thread* thread) {
1276 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1277 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1278 gclab->retire();
1279 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1280 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1281 }
1282 }
1283 };
1284
1285 void ShenandoahHeap::labs_make_parsable() {
1286 assert(UseTLAB, "Only call with UseTLAB");
1287
1288 ShenandoahRetireGCLABClosure cl(false);
1289
1290 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1291 ThreadLocalAllocBuffer& tlab = t->tlab();
1292 tlab.make_parsable();
1293 cl.do_thread(t);
1294 }
1295
1296 workers()->threads_do(&cl);
1297 }
1298
1299 void ShenandoahHeap::tlabs_retire(bool resize) {
1300 assert(UseTLAB, "Only call with UseTLAB");
1301 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1390
1391 workers()->threads_do(tcl);
1392 if (_safepoint_workers != nullptr) {
1393 _safepoint_workers->threads_do(tcl);
1394 }
1395 }
1396
1397 void ShenandoahHeap::print_tracing_info() const {
1398 LogTarget(Info, gc, stats) lt;
1399 if (lt.is_enabled()) {
1400 ResourceMark rm;
1401 LogStream ls(lt);
1402
1403 phase_timings()->print_global_on(&ls);
1404
1405 ls.cr();
1406 ls.cr();
1407
1408 shenandoah_policy()->print_gc_stats(&ls);
1409
1410 ls.cr();
1411 ls.cr();
1412 }
1413 }
1414
1415 void ShenandoahHeap::verify(VerifyOption vo) {
1416 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1417 if (ShenandoahVerify) {
1418 verifier()->verify_generic(vo);
1419 } else {
1420 // TODO: Consider allocating verification bitmaps on demand,
1421 // and turn this on unconditionally.
1422 }
1423 }
1424 }
1425 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1426 return _free_set->capacity();
1427 }
1428
1429 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1430 private:
1431 MarkBitMap* _bitmap;
1432 ShenandoahScanObjectStack* _oop_stack;
1433 ShenandoahHeap* const _heap;
1434 ShenandoahMarkingContext* const _marking_context;
1731 } else {
1732 heap_region_iterate(blk);
1733 }
1734 }
1735
1736 class ShenandoahRendezvousClosure : public HandshakeClosure {
1737 public:
1738 inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1739 inline void do_thread(Thread* thread) {}
1740 };
1741
1742 void ShenandoahHeap::rendezvous_threads() {
1743 ShenandoahRendezvousClosure cl;
1744 Handshake::execute(&cl);
1745 }
1746
1747 void ShenandoahHeap::recycle_trash() {
1748 free_set()->recycle_trash();
1749 }
1750
1751 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1752 private:
1753 ShenandoahMarkingContext* const _ctx;
1754 public:
1755 ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1756
1757 void heap_region_do(ShenandoahHeapRegion* r) {
1758 if (r->is_active()) {
1759 // Reset live data and set TAMS optimistically. We would recheck these under the pause
1760 // anyway to capture any updates that happened since now.
1761 r->clear_live_data();
1762 _ctx->capture_top_at_mark_start(r);
1763 }
1764 }
1765
1766 bool is_thread_safe() { return true; }
1767 };
1768
1769 void ShenandoahHeap::prepare_gc() {
1770 reset_mark_bitmap();
1771
1772 ShenandoahResetUpdateRegionStateClosure cl;
1773 parallel_heap_region_iterate(&cl);
1774 }
1775
1776 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1777 private:
1778 ShenandoahMarkingContext* const _ctx;
1779 ShenandoahHeapLock* const _lock;
1780
1781 public:
1782 ShenandoahFinalMarkUpdateRegionStateClosure() :
1783 _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1784
1785 void heap_region_do(ShenandoahHeapRegion* r) {
1786 if (r->is_active()) {
1787 // All allocations past TAMS are implicitly live, adjust the region data.
1788 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1789 HeapWord *tams = _ctx->top_at_mark_start(r);
1790 HeapWord *top = r->top();
1791 if (top > tams) {
1792 r->increase_live_data_alloc_words(pointer_delta(top, tams));
1793 }
1794
1795 // We are about to select the collection set, make sure it knows about
1796 // current pinning status. Also, this allows trashing more regions that
1797 // now have their pinning status dropped.
1798 if (r->is_pinned()) {
1799 if (r->pin_count() == 0) {
1800 ShenandoahHeapLocker locker(_lock);
1801 r->make_unpinned();
1802 }
1803 } else {
1804 if (r->pin_count() > 0) {
1805 ShenandoahHeapLocker locker(_lock);
1806 r->make_pinned();
1807 }
1808 }
1809
1810 // Remember limit for updating refs. It's guaranteed that we get no
1811 // from-space-refs written from here on.
1812 r->set_update_watermark_at_safepoint(r->top());
1813 } else {
1814 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1815 assert(_ctx->top_at_mark_start(r) == r->top(),
1816 "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1817 }
1818 }
1819
1820 bool is_thread_safe() { return true; }
1821 };
1822
1823 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1824 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1825 {
1826 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1827 ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1828 ShenandoahFinalMarkUpdateRegionStateClosure cl;
1829 parallel_heap_region_iterate(&cl);
1830
1831 assert_pinned_region_status();
1832 }
1833
1834 {
1835 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1836 ShenandoahPhaseTimings::degen_gc_choose_cset);
1837 ShenandoahHeapLocker locker(lock());
1838 _collection_set->clear();
1839 heuristics()->choose_collection_set(_collection_set);
1840 }
1841
1842 {
1843 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1844 ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1845 ShenandoahHeapLocker locker(lock());
1846 _free_set->rebuild();
1847 }
1848 }
1849
1850 void ShenandoahHeap::do_class_unloading() {
1851 _unloader.unload();
1852 }
1853
1854 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1855 // Weak refs processing
1856 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1857 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1858 ShenandoahTimingsTracker t(phase);
1859 ShenandoahGCWorkerPhase worker_phase(phase);
1860 ref_processor()->process_references(phase, workers(), false /* concurrent */);
1861 }
1862
1863 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1864 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1865
1866 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1867 // make them parsable for update code to work correctly. Plus, we can compute new sizes
1868 // for future GCLABs here.
1869 if (UseTLAB) {
1870 ShenandoahGCPhase phase(concurrent ?
1871 ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1872 ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1873 gclabs_retire(ResizeTLAB);
1874 }
1875
1876 _update_refs_iterator.reset();
1877 }
1878
1879 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1880 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1881 if (_gc_state_changed) {
1882 _gc_state_changed = false;
1883 char state = gc_state();
1884 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1885 ShenandoahThreadLocalData::set_gc_state(t, state);
1886 }
1887 }
1888 }
1889
1890 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1891 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1892 _gc_state.set_cond(mask, value);
1893 _gc_state_changed = true;
1894 }
1895
1896 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1897 assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1898 set_gc_state(MARKING, in_progress);
1899 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1900 }
1901
1902 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1903 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1904 set_gc_state(EVACUATION, in_progress);
1905 }
1906
1907 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1908 if (in_progress) {
1909 _concurrent_strong_root_in_progress.set();
1910 } else {
1911 _concurrent_strong_root_in_progress.unset();
1912 }
1913 }
1914
1915 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1916 set_gc_state(WEAK_ROOTS, cond);
1917 }
1918
1919 GCTracer* ShenandoahHeap::tracer() {
1920 return shenandoah_policy()->tracer();
1921 }
1922
1923 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1924 return _free_set->used();
1925 }
1926
1927 bool ShenandoahHeap::try_cancel_gc() {
1928 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1929 return prev == CANCELLABLE;
1930 }
1931
1932 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1933 if (try_cancel_gc()) {
1934 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1935 log_info(gc)("%s", msg.buffer());
1936 Events::log(Thread::current(), "%s", msg.buffer());
1937 }
1938 }
1939
1940 uint ShenandoahHeap::max_workers() {
1941 return _max_workers;
1942 }
1943
1944 void ShenandoahHeap::stop() {
1945 // The shutdown sequence should be able to terminate when GC is running.
1946
1947 // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
1948 _shenandoah_policy->record_shutdown();
1949
1950 // Step 1. Notify control thread that we are in shutdown.
1951 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1952 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1953 control_thread()->prepare_for_graceful_shutdown();
1954
1955 // Step 2. Notify GC workers that we are cancelling GC.
1956 cancel_gc(GCCause::_shenandoah_stop_vm);
2040 }
2041
2042 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2043 set_gc_state(HAS_FORWARDED, cond);
2044 }
2045
2046 void ShenandoahHeap::set_unload_classes(bool uc) {
2047 _unload_classes.set_cond(uc);
2048 }
2049
2050 bool ShenandoahHeap::unload_classes() const {
2051 return _unload_classes.is_set();
2052 }
2053
2054 address ShenandoahHeap::in_cset_fast_test_addr() {
2055 ShenandoahHeap* heap = ShenandoahHeap::heap();
2056 assert(heap->collection_set() != nullptr, "Sanity");
2057 return (address) heap->collection_set()->biased_map_address();
2058 }
2059
2060 size_t ShenandoahHeap::bytes_allocated_since_gc_start() const {
2061 return Atomic::load(&_bytes_allocated_since_gc_start);
2062 }
2063
2064 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2065 Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
2066 }
2067
2068 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2069 _degenerated_gc_in_progress.set_cond(in_progress);
2070 }
2071
2072 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2073 _full_gc_in_progress.set_cond(in_progress);
2074 }
2075
2076 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2077 assert (is_full_gc_in_progress(), "should be");
2078 _full_gc_move_in_progress.set_cond(in_progress);
2079 }
2080
2081 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2082 set_gc_state(UPDATEREFS, in_progress);
2083 }
2084
2085 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2109 if (r->is_active()) {
2110 if (r->is_pinned()) {
2111 if (r->pin_count() == 0) {
2112 r->make_unpinned();
2113 }
2114 } else {
2115 if (r->pin_count() > 0) {
2116 r->make_pinned();
2117 }
2118 }
2119 }
2120 }
2121
2122 assert_pinned_region_status();
2123 }
2124
2125 #ifdef ASSERT
2126 void ShenandoahHeap::assert_pinned_region_status() {
2127 for (size_t i = 0; i < num_regions(); i++) {
2128 ShenandoahHeapRegion* r = get_region(i);
2129 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2130 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2131 }
2132 }
2133 #endif
2134
2135 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2136 return _gc_timer;
2137 }
2138
2139 void ShenandoahHeap::prepare_concurrent_roots() {
2140 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2141 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2142 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2143 set_concurrent_weak_root_in_progress(true);
2144 if (unload_classes()) {
2145 _unloader.prepare();
2146 }
2147 }
2148
2149 void ShenandoahHeap::finish_concurrent_roots() {
2150 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2165 } else {
2166 // Use ConcGCThreads outside safepoints
2167 assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2168 ConcGCThreads, nworkers);
2169 }
2170 }
2171 #endif
2172
2173 ShenandoahVerifier* ShenandoahHeap::verifier() {
2174 guarantee(ShenandoahVerify, "Should be enabled");
2175 assert (_verifier != nullptr, "sanity");
2176 return _verifier;
2177 }
2178
2179 template<bool CONCURRENT>
2180 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2181 private:
2182 ShenandoahHeap* _heap;
2183 ShenandoahRegionIterator* _regions;
2184 public:
2185 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2186 WorkerTask("Shenandoah Update References"),
2187 _heap(ShenandoahHeap::heap()),
2188 _regions(regions) {
2189 }
2190
2191 void work(uint worker_id) {
2192 if (CONCURRENT) {
2193 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2194 ShenandoahSuspendibleThreadSetJoiner stsj;
2195 do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2196 } else {
2197 ShenandoahParallelWorkerSession worker_session(worker_id);
2198 do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2199 }
2200 }
2201
2202 private:
2203 template<class T>
2204 void do_work(uint worker_id) {
2205 T cl;
2206 if (CONCURRENT && (worker_id == 0)) {
2207 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2208 // results of evacuation. These reserves are no longer necessary because evacuation has completed.
2209 size_t cset_regions = _heap->collection_set()->count();
2210 // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled because
2211 // we need the reclaimed collection set regions to replenish the collector reserves
2212 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2213 }
2214 // If !CONCURRENT, there's no value in expanding Mutator free set
2215
2216 ShenandoahHeapRegion* r = _regions->next();
2217 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2218 while (r != nullptr) {
2219 HeapWord* update_watermark = r->get_update_watermark();
2220 assert (update_watermark >= r->bottom(), "sanity");
2221 if (r->is_active() && !r->is_cset()) {
2222 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2223 }
2224 if (ShenandoahPacing) {
2225 _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2226 }
2227 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2228 return;
2229 }
2230 r = _regions->next();
2231 }
2232 }
2233 };
2234
2235 void ShenandoahHeap::update_heap_references(bool concurrent) {
2236 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2237
2238 if (concurrent) {
2239 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2240 workers()->run_task(&task);
2241 } else {
2242 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2243 workers()->run_task(&task);
2244 }
2245 }
2246
2247
2248 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2249 private:
2250 ShenandoahHeapLock* const _lock;
2251
2252 public:
2253 ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2254
2255 void heap_region_do(ShenandoahHeapRegion* r) {
2256 // Drop unnecessary "pinned" state from regions that does not have CP marks
2257 // anymore, as this would allow trashing them.
2258
2259 if (r->is_active()) {
2260 if (r->is_pinned()) {
2261 if (r->pin_count() == 0) {
2262 ShenandoahHeapLocker locker(_lock);
2263 r->make_unpinned();
2264 }
2265 } else {
2266 if (r->pin_count() > 0) {
2267 ShenandoahHeapLocker locker(_lock);
2268 r->make_pinned();
2269 }
2270 }
2271 }
2272 }
2273
2274 bool is_thread_safe() { return true; }
2275 };
2276
2277 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2278 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2279 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2280
2281 {
2282 ShenandoahGCPhase phase(concurrent ?
2283 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2284 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2285 ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2286 parallel_heap_region_iterate(&cl);
2287
2288 assert_pinned_region_status();
2289 }
2290
2291 {
2292 ShenandoahGCPhase phase(concurrent ?
2293 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2294 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2295 trash_cset_regions();
2296 }
2297 }
2298
2299 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2300 {
2301 ShenandoahGCPhase phase(concurrent ?
2302 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2303 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2304 ShenandoahHeapLocker locker(lock());
2305 _free_set->rebuild();
2306 }
2307 }
2308
2309 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2310 print_on(st);
2311 st->cr();
2312 print_heap_regions_on(st);
2313 }
2314
2315 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2316 size_t slice = r->index() / _bitmap_regions_per_slice;
2317
2318 size_t regions_from = _bitmap_regions_per_slice * slice;
2319 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2320 for (size_t g = regions_from; g < regions_to; g++) {
2321 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2322 if (skip_self && g == r->index()) continue;
2323 if (get_region(g)->is_committed()) {
2324 return true;
2325 }
2408 void ShenandoahHeap::initialize_serviceability() {
2409 _memory_pool = new ShenandoahMemoryPool(this);
2410 _cycle_memory_manager.add_pool(_memory_pool);
2411 _stw_memory_manager.add_pool(_memory_pool);
2412 }
2413
2414 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2415 GrowableArray<GCMemoryManager*> memory_managers(2);
2416 memory_managers.append(&_cycle_memory_manager);
2417 memory_managers.append(&_stw_memory_manager);
2418 return memory_managers;
2419 }
2420
2421 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2422 GrowableArray<MemoryPool*> memory_pools(1);
2423 memory_pools.append(_memory_pool);
2424 return memory_pools;
2425 }
2426
2427 MemoryUsage ShenandoahHeap::memory_usage() {
2428 return _memory_pool->get_memory_usage();
2429 }
2430
2431 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2432 _heap(ShenandoahHeap::heap()),
2433 _index(0) {}
2434
2435 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2436 _heap(heap),
2437 _index(0) {}
2438
2439 void ShenandoahRegionIterator::reset() {
2440 _index = 0;
2441 }
2442
2443 bool ShenandoahRegionIterator::has_next() const {
2444 return _index < _heap->num_regions();
2445 }
2446
2447 char ShenandoahHeap::gc_state() const {
2448 return _gc_state.raw_value();
2473 }
2474 }
2475
2476 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2477 if (is_idle()) return false;
2478
2479 // Objects allocated after marking start are implicitly alive, don't need any barriers during
2480 // marking phase.
2481 if (is_concurrent_mark_in_progress() &&
2482 !marking_context()->allocated_after_mark_start(obj)) {
2483 return true;
2484 }
2485
2486 // Can not guarantee obj is deeply good.
2487 if (has_forwarded_objects()) {
2488 return true;
2489 }
2490
2491 return false;
2492 }
|
1 /*
2 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "memory/allocation.hpp"
29 #include "memory/universe.hpp"
30
31 #include "gc/shared/classUnloadingContext.hpp"
32 #include "gc/shared/gcArguments.hpp"
33 #include "gc/shared/gcTimer.hpp"
34 #include "gc/shared/gcTraceTime.inline.hpp"
35 #include "gc/shared/locationPrinter.inline.hpp"
36 #include "gc/shared/memAllocator.hpp"
37 #include "gc/shared/plab.hpp"
38 #include "gc/shared/tlab_globals.hpp"
39
40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
42 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
43 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
44 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
45 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
46 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
47 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
48 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
49 #include "gc/shenandoah/shenandoahControlThread.hpp"
50 #include "gc/shenandoah/shenandoahFreeSet.hpp"
51 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
52 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
53 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
54 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
55 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
56 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
57 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
58 #include "gc/shenandoah/shenandoahInitLogger.hpp"
59 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
60 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
61 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
62 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
63 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
64 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
65 #include "gc/shenandoah/shenandoahPadding.hpp"
66 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
67 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
68 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
69 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
70 #include "gc/shenandoah/shenandoahSTWMark.hpp"
71 #include "gc/shenandoah/shenandoahUtils.hpp"
72 #include "gc/shenandoah/shenandoahVerifier.hpp"
73 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
74 #include "gc/shenandoah/shenandoahVMOperations.hpp"
75 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
76 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
77 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
78 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
79 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
80 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
81 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
82 #include "utilities/globalDefinitions.hpp"
83
84 #if INCLUDE_JFR
85 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
86 #endif
87
88 #include "classfile/systemDictionary.hpp"
89 #include "code/codeCache.hpp"
90 #include "memory/classLoaderMetaspace.hpp"
91 #include "memory/metaspaceUtils.hpp"
92 #include "nmt/mallocTracker.hpp"
93 #include "nmt/memTracker.hpp"
94 #include "oops/compressedOops.inline.hpp"
95 #include "prims/jvmtiTagMap.hpp"
96 #include "runtime/atomic.hpp"
97 #include "runtime/globals.hpp"
98 #include "runtime/interfaceSupport.inline.hpp"
99 #include "runtime/java.hpp"
100 #include "runtime/orderAccess.hpp"
101 #include "runtime/safepointMechanism.hpp"
102 #include "runtime/stackWatermarkSet.hpp"
103 #include "runtime/vmThread.hpp"
156 jint ShenandoahHeap::initialize() {
157 //
158 // Figure out heap sizing
159 //
160
161 size_t init_byte_size = InitialHeapSize;
162 size_t min_byte_size = MinHeapSize;
163 size_t max_byte_size = MaxHeapSize;
164 size_t heap_alignment = HeapAlignment;
165
166 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
167
168 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
169 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
170
171 _num_regions = ShenandoahHeapRegion::region_count();
172 assert(_num_regions == (max_byte_size / reg_size_bytes),
173 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
174 _num_regions, max_byte_size, reg_size_bytes);
175
176 size_t num_committed_regions = init_byte_size / reg_size_bytes;
177 num_committed_regions = MIN2(num_committed_regions, _num_regions);
178 assert(num_committed_regions <= _num_regions, "sanity");
179 _initial_size = num_committed_regions * reg_size_bytes;
180
181 size_t num_min_regions = min_byte_size / reg_size_bytes;
182 num_min_regions = MIN2(num_min_regions, _num_regions);
183 assert(num_min_regions <= _num_regions, "sanity");
184 _minimum_size = num_min_regions * reg_size_bytes;
185
186 // Default to max heap size.
187 _soft_max_size = _num_regions * reg_size_bytes;
188
189 _committed = _initial_size;
190
191 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
192 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
193 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
194
195 //
209 heap_rs.size(), heap_rs.page_size());
210
211 #if SHENANDOAH_OPTIMIZED_MARKTASK
212 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
213 // Fail if we ever attempt to address more than we can.
214 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
215 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
216 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
217 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
218 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
219 vm_exit_during_initialization("Fatal Error", buf);
220 }
221 #endif
222
223 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
224 if (!_heap_region_special) {
225 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
226 "Cannot commit heap memory");
227 }
228
229 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
230
231 // Now we know the number of regions and heap sizes, initialize the heuristics.
232 initialize_heuristics();
233
234 assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
235
236 //
237 // Worker threads must be initialized after the barrier is configured
238 //
239 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
240 if (_workers == nullptr) {
241 vm_exit_during_initialization("Failed necessary allocation.");
242 } else {
243 _workers->initialize_workers();
244 }
245
246 if (ParallelGCThreads > 1) {
247 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
248 _safepoint_workers->initialize_workers();
249 }
250
251 //
252 // Reserve and commit memory for bitmap(s)
253 //
254
255 size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
256 _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
257
258 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
259
260 guarantee(bitmap_bytes_per_region != 0,
261 "Bitmap bytes per region should not be zero");
262 guarantee(is_power_of_2(bitmap_bytes_per_region),
263 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
264
265 if (bitmap_page_size > bitmap_bytes_per_region) {
266 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
267 _bitmap_bytes_per_slice = bitmap_page_size;
268 } else {
269 _bitmap_regions_per_slice = 1;
270 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
271 }
272
273 guarantee(_bitmap_regions_per_slice >= 1,
274 "Should have at least one region per slice: " SIZE_FORMAT,
275 _bitmap_regions_per_slice);
276
277 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
278 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
279 _bitmap_bytes_per_slice, bitmap_page_size);
280
281 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
282 os::trace_page_sizes_for_requested_size("Mark Bitmap",
283 bitmap_size_orig, bitmap_page_size,
284 bitmap.base(),
285 bitmap.size(), bitmap.page_size());
286 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
287 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
288 _bitmap_region_special = bitmap.special();
289
290 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
291 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
292 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
293 if (!_bitmap_region_special) {
294 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
295 "Cannot commit bitmap memory");
296 }
297
298 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
299
300 if (ShenandoahVerify) {
301 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
302 os::trace_page_sizes_for_requested_size("Verify Bitmap",
303 bitmap_size_orig, bitmap_page_size,
304 verify_bitmap.base(),
305 verify_bitmap.size(), verify_bitmap.page_size());
306 if (!verify_bitmap.special()) {
307 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
308 "Cannot commit verification bitmap memory");
309 }
310 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
311 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
312 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
313 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
314 }
315
316 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
317 size_t aux_bitmap_page_size = bitmap_page_size;
318 #ifdef LINUX
369 assert(is_aligned(req_addr, cset_align), "Should be aligned");
370 cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
371 if (cset_rs.is_reserved()) {
372 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
373 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
374 break;
375 }
376 }
377
378 if (_collection_set == nullptr) {
379 cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
380 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
381 }
382 os::trace_page_sizes_for_requested_size("Collection Set",
383 cset_size, cset_page_size,
384 cset_rs.base(),
385 cset_rs.size(), cset_rs.page_size());
386 }
387
388 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
389 _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
390 _free_set = new ShenandoahFreeSet(this, _num_regions);
391
392 {
393 ShenandoahHeapLocker locker(lock());
394
395
396 for (size_t i = 0; i < _num_regions; i++) {
397 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
398 bool is_committed = i < num_committed_regions;
399 void* loc = region_storage.base() + i * region_align;
400
401 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
402 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
403
404 _marking_context->initialize_top_at_mark_start(r);
405 _regions[i] = r;
406 assert(!collection_set()->is_in(i), "New region should not be in collection set");
407
408 _affiliations[i] = ShenandoahAffiliation::FREE;
409 }
410
411 // Initialize to complete
412 _marking_context->mark_complete();
413 size_t young_cset_regions, old_cset_regions;
414
415 // We are initializing free set. We ignore cset region tallies.
416 size_t first_old, last_old, num_old;
417 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
418 _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
419 }
420
421 if (AlwaysPreTouch) {
422 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
423 // before initialize() below zeroes it with initializing thread. For any given region,
424 // we touch the region and the corresponding bitmaps from the same thread.
425 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
426
427 _pretouch_heap_page_size = heap_page_size;
428 _pretouch_bitmap_page_size = bitmap_page_size;
429
430 #ifdef LINUX
431 // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
432 // pages. But, the kernel needs to know that every small page is used, in order to coalesce
433 // them into huge one. Therefore, we need to pretouch with smaller pages.
434 if (UseTransparentHugePages) {
435 _pretouch_heap_page_size = (size_t)os::vm_page_size();
436 _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
437 }
438 #endif
457 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
458 }
459
460 // There should probably be Shenandoah-specific options for these,
461 // just as there are G1-specific options.
462 {
463 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
464 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
465 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
466 }
467
468 _monitoring_support = new ShenandoahMonitoringSupport(this);
469 _phase_timings = new ShenandoahPhaseTimings(max_workers());
470 ShenandoahCodeRoots::initialize();
471
472 if (ShenandoahPacing) {
473 _pacer = new ShenandoahPacer(this);
474 _pacer->setup_for_idle();
475 }
476
477 initialize_controller();
478
479 print_init_logger();
480
481 return JNI_OK;
482 }
483
484 void ShenandoahHeap::initialize_controller() {
485 _control_thread = new ShenandoahControlThread();
486 }
487
488 void ShenandoahHeap::print_init_logger() const {
489 ShenandoahInitLogger::print();
490 }
491
492 void ShenandoahHeap::initialize_mode() {
493 if (ShenandoahGCMode != nullptr) {
494 if (strcmp(ShenandoahGCMode, "satb") == 0) {
495 _gc_mode = new ShenandoahSATBMode();
496 } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
497 _gc_mode = new ShenandoahIUMode();
498 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
499 _gc_mode = new ShenandoahPassiveMode();
500 } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
501 _gc_mode = new ShenandoahGenerationalMode();
502 } else {
503 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
504 }
505 } else {
506 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
507 }
508 _gc_mode->initialize_flags();
509 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
510 vm_exit_during_initialization(
511 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
512 _gc_mode->name()));
513 }
514 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
515 vm_exit_during_initialization(
516 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
517 _gc_mode->name()));
518 }
519 }
520
521 void ShenandoahHeap::initialize_heuristics() {
522 _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
523 _global_generation->initialize_heuristics(mode());
524 _evac_tracker = new ShenandoahEvacuationTracker(mode()->is_generational());
525 }
526
527 #ifdef _MSC_VER
528 #pragma warning( push )
529 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
530 #endif
531
532 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
533 CollectedHeap(),
534 _gc_generation(nullptr),
535 _active_generation(nullptr),
536 _initial_size(0),
537 _committed(0),
538 _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
539 _workers(nullptr),
540 _safepoint_workers(nullptr),
541 _heap_region_special(false),
542 _num_regions(0),
543 _regions(nullptr),
544 _affiliations(nullptr),
545 _gc_state_changed(false),
546 _gc_no_progress_count(0),
547 _cancel_requested_time(0),
548 _update_refs_iterator(this),
549 _global_generation(nullptr),
550 _control_thread(nullptr),
551 _young_generation(nullptr),
552 _old_generation(nullptr),
553 _shenandoah_policy(policy),
554 _gc_mode(nullptr),
555 _free_set(nullptr),
556 _pacer(nullptr),
557 _verifier(nullptr),
558 _phase_timings(nullptr),
559 _evac_tracker(nullptr),
560 _mmu_tracker(),
561 _monitoring_support(nullptr),
562 _memory_pool(nullptr),
563 _stw_memory_manager("Shenandoah Pauses"),
564 _cycle_memory_manager("Shenandoah Cycles"),
565 _gc_timer(new ConcurrentGCTimer()),
566 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
567 _marking_context(nullptr),
568 _bitmap_size(0),
569 _bitmap_regions_per_slice(0),
570 _bitmap_bytes_per_slice(0),
571 _bitmap_region_special(false),
572 _aux_bitmap_region_special(false),
573 _liveness_cache(nullptr),
574 _collection_set(nullptr)
575 {
576 // Initialize GC mode early, many subsequent initialization procedures depend on it
577 initialize_mode();
578 }
579
580 #ifdef _MSC_VER
581 #pragma warning( pop )
582 #endif
583
584 void ShenandoahHeap::print_on(outputStream* st) const {
585 st->print_cr("Shenandoah Heap");
586 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
587 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
588 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
589 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
590 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
591 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
592 num_regions(),
593 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
594 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
595
596 st->print("Status: ");
597 if (has_forwarded_objects()) st->print("has forwarded objects, ");
598 if (is_concurrent_old_mark_in_progress()) st->print("old marking, ");
599 if (is_concurrent_young_mark_in_progress()) st->print("young marking, ");
600 if (is_evacuation_in_progress()) st->print("evacuating, ");
601 if (is_update_refs_in_progress()) st->print("updating refs, ");
602 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
603 if (is_full_gc_in_progress()) st->print("full gc, ");
604 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
605 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
606 if (is_concurrent_strong_root_in_progress() &&
607 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
608
609 if (cancelled_gc()) {
610 st->print("cancelled");
611 } else {
612 st->print("not cancelled");
613 }
614 st->cr();
615
616 st->print_cr("Reserved region:");
617 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
618 p2i(reserved_region().start()),
619 p2i(reserved_region().end()));
630 st->cr();
631 MetaspaceUtils::print_on(st);
632
633 if (Verbose) {
634 st->cr();
635 print_heap_regions_on(st);
636 }
637 }
638
639 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
640 public:
641 void do_thread(Thread* thread) {
642 assert(thread != nullptr, "Sanity");
643 assert(thread->is_Worker_thread(), "Only worker thread expected");
644 ShenandoahThreadLocalData::initialize_gclab(thread);
645 }
646 };
647
648 void ShenandoahHeap::post_initialize() {
649 CollectedHeap::post_initialize();
650 _mmu_tracker.initialize();
651
652 MutexLocker ml(Threads_lock);
653
654 ShenandoahInitWorkerGCLABClosure init_gclabs;
655 _workers->threads_do(&init_gclabs);
656
657 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
658 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
659 _workers->set_initialize_gclab();
660 if (_safepoint_workers != nullptr) {
661 _safepoint_workers->threads_do(&init_gclabs);
662 _safepoint_workers->set_initialize_gclab();
663 }
664
665 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
666 }
667
668 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
669 return _global_generation->heuristics();
670 }
671
672 size_t ShenandoahHeap::used() const {
673 return global_generation()->used();
674 }
675
676 size_t ShenandoahHeap::committed() const {
677 return Atomic::load(&_committed);
678 }
679
680 void ShenandoahHeap::increase_committed(size_t bytes) {
681 shenandoah_assert_heaplocked_or_safepoint();
682 _committed += bytes;
683 }
684
685 void ShenandoahHeap::decrease_committed(size_t bytes) {
686 shenandoah_assert_heaplocked_or_safepoint();
687 _committed -= bytes;
688 }
689
690 // For tracking usage based on allocations, it should be the case that:
691 // * The sum of regions::used == heap::used
692 // * The sum of a generation's regions::used == generation::used
693 // * The sum of a generation's humongous regions::free == generation::humongous_waste
694 // These invariants are checked by the verifier on GC safepoints.
695 //
696 // Additional notes:
697 // * When a mutator's allocation request causes a region to be retired, the
698 // free memory left in that region is considered waste. It does not contribute
699 // to the usage, but it _does_ contribute to allocation rate.
700 // * The bottom of a PLAB must be aligned on card size. In some cases this will
701 // require padding in front of the PLAB (a filler object). Because this padding
702 // is included in the region's used memory we include the padding in the usage
703 // accounting as waste.
704 // * Mutator allocations are used to compute an allocation rate. They are also
705 // sent to the Pacer for those purposes.
706 // * There are three sources of waste:
707 // 1. The padding used to align a PLAB on card size
708 // 2. Region's free is less than minimum TLAB size and is retired
709 // 3. The unused portion of memory in the last region of a humongous object
710 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
711 size_t actual_bytes = req.actual_size() * HeapWordSize;
712 size_t wasted_bytes = req.waste() * HeapWordSize;
713 ShenandoahGeneration* generation = generation_for(req.affiliation());
714
715 if (req.is_gc_alloc()) {
716 assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
717 increase_used(generation, actual_bytes + wasted_bytes);
718 } else {
719 assert(req.is_mutator_alloc(), "Expected mutator alloc here");
720 // padding and actual size both count towards allocation counter
721 generation->increase_allocated(actual_bytes + wasted_bytes);
722
723 // only actual size counts toward usage for mutator allocations
724 increase_used(generation, actual_bytes);
725
726 // notify pacer of both actual size and waste
727 notify_mutator_alloc_words(req.actual_size(), req.waste());
728
729 if (wasted_bytes > 0 && req.actual_size() > ShenandoahHeapRegion::humongous_threshold_words()) {
730 increase_humongous_waste(generation,wasted_bytes);
731 }
732 }
733 }
734
735 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
736 generation->increase_humongous_waste(bytes);
737 if (!generation->is_global()) {
738 global_generation()->increase_humongous_waste(bytes);
739 }
740 }
741
742 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
743 generation->decrease_humongous_waste(bytes);
744 if (!generation->is_global()) {
745 global_generation()->decrease_humongous_waste(bytes);
746 }
747 }
748
749 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
750 generation->increase_used(bytes);
751 if (!generation->is_global()) {
752 global_generation()->increase_used(bytes);
753 }
754 }
755
756 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
757 generation->decrease_used(bytes);
758 if (!generation->is_global()) {
759 global_generation()->decrease_used(bytes);
760 }
761 }
762
763 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
764 if (ShenandoahPacing) {
765 control_thread()->pacing_notify_alloc(words);
766 if (waste > 0) {
767 pacer()->claim_for_alloc(waste, true);
768 }
769 }
770 }
771
772 size_t ShenandoahHeap::capacity() const {
773 return committed();
774 }
775
776 size_t ShenandoahHeap::max_capacity() const {
777 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
778 }
779
780 size_t ShenandoahHeap::soft_max_capacity() const {
781 size_t v = Atomic::load(&_soft_max_size);
782 assert(min_capacity() <= v && v <= max_capacity(),
783 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
784 min_capacity(), v, max_capacity());
785 return v;
786 }
787
788 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
789 assert(min_capacity() <= v && v <= max_capacity(),
790 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
791 min_capacity(), v, max_capacity());
792 Atomic::store(&_soft_max_size, v);
793 }
794
795 size_t ShenandoahHeap::min_capacity() const {
796 return _minimum_size;
797 }
798
799 size_t ShenandoahHeap::initial_capacity() const {
800 return _initial_size;
801 }
802
803 void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
804 assert (ShenandoahUncommit, "should be enabled");
805
806 // Determine if there is work to do. This avoids taking heap lock if there is
807 // no work available, avoids spamming logs with superfluous logging messages,
808 // and minimises the amount of work while locks are taken.
809
810 if (committed() <= shrink_until) return;
811
812 bool has_work = false;
813 for (size_t i = 0; i < num_regions(); i++) {
814 ShenandoahHeapRegion* r = get_region(i);
815 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
816 has_work = true;
817 break;
818 }
819 }
820
821 if (has_work) {
822 static const char* msg = "Concurrent uncommit";
862 size_t old_soft_max = soft_max_capacity();
863 if (new_soft_max != old_soft_max) {
864 new_soft_max = MAX2(min_capacity(), new_soft_max);
865 new_soft_max = MIN2(max_capacity(), new_soft_max);
866 if (new_soft_max != old_soft_max) {
867 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
868 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
869 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
870 );
871 set_soft_max_capacity(new_soft_max);
872 return true;
873 }
874 }
875 return false;
876 }
877
878 void ShenandoahHeap::notify_heap_changed() {
879 // Update monitoring counters when we took a new region. This amortizes the
880 // update costs on slow path.
881 monitoring_support()->notify_heap_changed();
882 _heap_changed.set();
883 }
884
885 void ShenandoahHeap::set_forced_counters_update(bool value) {
886 monitoring_support()->set_forced_counters_update(value);
887 }
888
889 void ShenandoahHeap::handle_force_counters_update() {
890 monitoring_support()->handle_force_counters_update();
891 }
892
893 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
894 // New object should fit the GCLAB size
895 size_t min_size = MAX2(size, PLAB::min_size());
896
897 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
898 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
899
900 new_size = MIN2(new_size, PLAB::max_size());
901 new_size = MAX2(new_size, PLAB::min_size());
902
903 // Record new heuristic value even if we take any shortcut. This captures
904 // the case when moderately-sized objects always take a shortcut. At some point,
905 // heuristics should catch up with them.
906 log_debug(gc, free)("Set new GCLAB size: " SIZE_FORMAT, new_size);
907 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
908
909 if (new_size < size) {
910 // New size still does not fit the object. Fall back to shared allocation.
911 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
912 log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
913 return nullptr;
914 }
915
916 // Retire current GCLAB, and allocate a new one.
917 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
918 gclab->retire();
919
920 size_t actual_size = 0;
921 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
922 if (gclab_buf == nullptr) {
923 return nullptr;
924 }
925
926 assert (size <= actual_size, "allocation should fit");
927
928 // ...and clear or zap just allocated TLAB, if needed.
929 if (ZeroTLAB) {
930 Copy::zero_to_words(gclab_buf, actual_size);
931 } else if (ZapTLAB) {
932 // Skip mangling the space corresponding to the object header to
933 // ensure that the returned space is not considered parsable by
934 // any concurrent GC thread.
935 size_t hdr_size = oopDesc::header_size();
936 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
937 }
938 gclab->set_buf(gclab_buf, actual_size);
939 return gclab->allocate(size);
940 }
941
942 // Called from stubs in JIT code or interpreter
943 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
944 size_t requested_size,
945 size_t* actual_size) {
946 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
947 HeapWord* res = allocate_memory(req);
948 if (res != nullptr) {
949 *actual_size = req.actual_size();
950 } else {
951 *actual_size = 0;
952 }
953 return res;
954 }
955
956 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
957 size_t word_size,
958 size_t* actual_size) {
959 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
960 HeapWord* res = allocate_memory(req);
961 if (res != nullptr) {
962 *actual_size = req.actual_size();
971 bool in_new_region = false;
972 HeapWord* result = nullptr;
973
974 if (req.is_mutator_alloc()) {
975 if (ShenandoahPacing) {
976 pacer()->pace_for_alloc(req.size());
977 pacer_epoch = pacer()->epoch();
978 }
979
980 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
981 result = allocate_memory_under_lock(req, in_new_region);
982 }
983
984 // Check that gc overhead is not exceeded.
985 //
986 // Shenandoah will grind along for quite a while allocating one
987 // object at a time using shared (non-tlab) allocations. This check
988 // is testing that the GC overhead limit has not been exceeded.
989 // This will notify the collector to start a cycle, but will raise
990 // an OOME to the mutator if the last Full GCs have not made progress.
991 // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
992 if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
993 control_thread()->handle_alloc_failure(req, false);
994 req.set_actual_size(0);
995 return nullptr;
996 }
997
998 if (result == nullptr) {
999 // Block until control thread reacted, then retry allocation.
1000 //
1001 // It might happen that one of the threads requesting allocation would unblock
1002 // way later after GC happened, only to fail the second allocation, because
1003 // other threads have already depleted the free storage. In this case, a better
1004 // strategy is to try again, until at least one full GC has completed.
1005 //
1006 // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
1007 // a) We experienced a GC that had good progress, or
1008 // b) We experienced at least one Full GC (whether or not it had good progress)
1009 //
1010 // TODO: Consider GLOBAL GC rather than Full GC to remediate OOM condition: https://bugs.openjdk.org/browse/JDK-8335910
1011
1012 size_t original_count = shenandoah_policy()->full_gc_count();
1013 while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
1014 control_thread()->handle_alloc_failure(req, true);
1020 }
1021 if (log_is_enabled(Debug, gc, alloc)) {
1022 ResourceMark rm;
1023 log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
1024 ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
1025 Thread::current()->name(), p2i(result), req.type_string(), req.size(),
1026 original_count, get_gc_no_progress_count());
1027 }
1028 }
1029 } else {
1030 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1031 result = allocate_memory_under_lock(req, in_new_region);
1032 // Do not call handle_alloc_failure() here, because we cannot block.
1033 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1034 }
1035
1036 if (in_new_region) {
1037 notify_heap_changed();
1038 }
1039
1040 if (result == nullptr) {
1041 req.set_actual_size(0);
1042 }
1043
1044 // This is called regardless of the outcome of the allocation to account
1045 // for any waste created by retiring regions with this request.
1046 increase_used(req);
1047
1048 if (result != nullptr) {
1049 size_t requested = req.size();
1050 size_t actual = req.actual_size();
1051
1052 assert (req.is_lab_alloc() || (requested == actual),
1053 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1054 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1055
1056 if (req.is_mutator_alloc()) {
1057 // If we requested more than we were granted, give the rest back to pacer.
1058 // This only matters if we are in the same pacing epoch: do not try to unpace
1059 // over the budget for the other phase.
1060 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1061 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1062 }
1063 }
1064 }
1065
1066 return result;
1067 }
1068
1069 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1070 // If we are dealing with mutator allocation, then we may need to block for safepoint.
1071 // We cannot block for safepoint for GC allocations, because there is a high chance
1072 // we are already running at safepoint or from stack watermark machinery, and we cannot
1073 // block again.
1074 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1075
1076 // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1077 if (req.is_old() && !old_generation()->can_allocate(req)) {
1078 return nullptr;
1079 }
1080
1081 // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1082 // memory.
1083 HeapWord* result = _free_set->allocate(req, in_new_region);
1084
1085 // Record the plab configuration for this result and register the object.
1086 if (result != nullptr && req.is_old()) {
1087 old_generation()->configure_plab_for_current_thread(req);
1088 if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1089 // Register the newly allocated object while we're holding the global lock since there's no synchronization
1090 // built in to the implementation of register_object(). There are potential races when multiple independent
1091 // threads are allocating objects, some of which might span the same card region. For example, consider
1092 // a card table's memory region within which three objects are being allocated by three different threads:
1093 //
1094 // objects being "concurrently" allocated:
1095 // [-----a------][-----b-----][--------------c------------------]
1096 // [---- card table memory range --------------]
1097 //
1098 // Before any objects are allocated, this card's memory range holds no objects. Note that allocation of object a
1099 // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1100 // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1101 // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1102 // card region.
1103 //
1104 // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1105 // last-start representing object b while first-start represents object c. This is why we need to require all
1106 // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1107 old_generation()->card_scan()->register_object(result);
1108 }
1109 }
1110
1111 return result;
1112 }
1113
1114 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1115 bool* gc_overhead_limit_was_exceeded) {
1116 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1117 return allocate_memory(req);
1118 }
1119
1120 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1121 size_t size,
1122 Metaspace::MetadataType mdtype) {
1123 MetaWord* result;
1124
1125 // Inform metaspace OOM to GC heuristics if class unloading is possible.
1126 ShenandoahHeuristics* h = global_generation()->heuristics();
1127 if (h->can_unload_classes()) {
1128 h->record_metaspace_oom();
1129 }
1130
1131 // Expand and retry allocation
1132 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1133 if (result != nullptr) {
1134 return result;
1135 }
1136
1137 // Start full GC
1138 collect(GCCause::_metadata_GC_clear_soft_refs);
1139
1140 // Retry allocation
1141 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1142 if (result != nullptr) {
1143 return result;
1144 }
1145
1146 // Expand and retry allocation
1147 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1200 private:
1201 void do_work() {
1202 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1203 ShenandoahHeapRegion* r;
1204 while ((r =_cs->claim_next()) != nullptr) {
1205 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1206 _sh->marked_object_iterate(r, &cl);
1207
1208 if (ShenandoahPacing) {
1209 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1210 }
1211
1212 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1213 break;
1214 }
1215 }
1216 }
1217 };
1218
1219 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1220 if (mode()->is_generational()) {
1221 ShenandoahRegionIterator regions;
1222 ShenandoahGenerationalEvacuationTask task(ShenandoahGenerationalHeap::heap(), ®ions, concurrent);
1223 workers()->run_task(&task);
1224 } else {
1225 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1226 workers()->run_task(&task);
1227 }
1228 }
1229
1230 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1231 assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1232 if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1233 // This thread went through the OOM during evac protocol. It is safe to return
1234 // the forward pointer. It must not attempt to evacuate any other objects.
1235 return ShenandoahBarrierSet::resolve_forwarded(p);
1236 }
1237
1238 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1239
1240 ShenandoahHeapRegion* r = heap_region_containing(p);
1241 assert(!r->is_humongous(), "never evacuate humongous objects");
1242
1243 ShenandoahAffiliation target_gen = r->affiliation();
1244 return try_evacuate_object(p, thread, r, target_gen);
1245 }
1246
1247 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1248 ShenandoahAffiliation target_gen) {
1249 assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1250 assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1251 bool alloc_from_lab = true;
1252 HeapWord* copy = nullptr;
1253 size_t size = p->size();
1254
1255 #ifdef ASSERT
1256 if (ShenandoahOOMDuringEvacALot &&
1257 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1258 copy = nullptr;
1259 } else {
1260 #endif
1261 if (UseTLAB) {
1262 copy = allocate_from_gclab(thread, size);
1263 if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
1264 // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve. Try resetting
1265 // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
1266 // TODO: is this right? using PLAB::min_size() here for gc lab size?
1267 ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
1268 copy = allocate_from_gclab(thread, size);
1269 // If we still get nullptr, we'll try a shared allocation below.
1270 }
1271 }
1272
1273 if (copy == nullptr) {
1274 // If we failed to allocate in LAB, we'll try a shared allocation.
1275 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1276 copy = allocate_memory(req);
1277 alloc_from_lab = false;
1278 }
1279 #ifdef ASSERT
1280 }
1281 #endif
1282
1283 if (copy == nullptr) {
1284 control_thread()->handle_alloc_failure_evac(size);
1285
1286 _oom_evac_handler.handle_out_of_memory_during_evacuation();
1287
1288 return ShenandoahBarrierSet::resolve_forwarded(p);
1289 }
1290
1291 // Copy the object:
1292 _evac_tracker->begin_evacuation(thread, size * HeapWordSize);
1293 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1294
1295 oop copy_val = cast_to_oop(copy);
1296
1297 // Try to install the new forwarding pointer.
1298 ContinuationGCSupport::relativize_stack_chunk(copy_val);
1299
1300 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1301 if (result == copy_val) {
1302 // Successfully evacuated. Our copy is now the public one!
1303 _evac_tracker->end_evacuation(thread, size * HeapWordSize);
1304 shenandoah_assert_correct(nullptr, copy_val);
1305 return copy_val;
1306 } else {
1307 // Failed to evacuate. We need to deal with the object that is left behind. Since this
1308 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1309 // But if it happens to contain references to evacuated regions, those references would
1310 // not get updated for this stale copy during this cycle, and we will crash while scanning
1311 // it the next cycle.
1312 if (alloc_from_lab) {
1313 // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1314 // object will overwrite this stale copy, or the filler object on LAB retirement will
1315 // do this.
1316 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1317 } else {
1318 // For non-LAB allocations, we have no way to retract the allocation, and
1319 // have to explicitly overwrite the copy with the filler object. With that overwrite,
1320 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1321 assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1322 fill_with_object(copy, size);
1323 shenandoah_assert_correct(nullptr, copy_val);
1324 // For non-LAB allocations, the object has already been registered
1325 }
1326 shenandoah_assert_correct(nullptr, result);
1327 return result;
1328 }
1329 }
1330
1331 void ShenandoahHeap::trash_cset_regions() {
1332 ShenandoahHeapLocker locker(lock());
1333
1334 ShenandoahCollectionSet* set = collection_set();
1335 ShenandoahHeapRegion* r;
1336 set->clear_current_index();
1337 while ((r = set->next()) != nullptr) {
1338 r->make_trash();
1339 }
1340 collection_set()->clear();
1341 }
1342
1343 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1344 st->print_cr("Heap Regions:");
1345 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1346 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1347 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1348 st->print_cr("UWM=update watermark, U=used");
1349 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1350 st->print_cr("S=shared allocs, L=live data");
1351 st->print_cr("CP=critical pins");
1352
1353 for (size_t i = 0; i < num_regions(); i++) {
1354 get_region(i)->print_on(st);
1355 }
1356 }
1357
1358 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1359 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1360
1361 oop humongous_obj = cast_to_oop(start->bottom());
1362 size_t size = humongous_obj->size();
1363 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1364 size_t index = start->index() + required_regions - 1;
1365
1366 assert(!start->has_live(), "liveness must be zero");
1367
1368 for(size_t i = 0; i < required_regions; i++) {
1369 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1370 // as it expects that every region belongs to a humongous region starting with a humongous start region.
1371 ShenandoahHeapRegion* region = get_region(index --);
1372
1373 assert(region->is_humongous(), "expect correct humongous start or continuation");
1374 assert(!region->is_cset(), "Humongous region should not be in collection set");
1375
1376 region->make_trash_immediate();
1377 }
1378 return required_regions;
1379 }
1380
1381 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1382 public:
1383 ShenandoahCheckCleanGCLABClosure() {}
1384 void do_thread(Thread* thread) {
1385 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1386 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1387 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1388
1389 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1390 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1391 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1392 assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1393 }
1394 }
1395 };
1396
1397 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1398 private:
1399 bool const _resize;
1400 public:
1401 ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1402 void do_thread(Thread* thread) {
1403 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1404 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1405 gclab->retire();
1406 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1407 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1408 }
1409
1410 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1411 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1412 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1413
1414 // There are two reasons to retire all plabs between old-gen evacuation passes.
1415 // 1. We need to make the plab memory parsable by remembered-set scanning.
1416 // 2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1417 ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1418 if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1419 ShenandoahThreadLocalData::set_plab_size(thread, 0);
1420 }
1421 }
1422 }
1423 };
1424
1425 void ShenandoahHeap::labs_make_parsable() {
1426 assert(UseTLAB, "Only call with UseTLAB");
1427
1428 ShenandoahRetireGCLABClosure cl(false);
1429
1430 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1431 ThreadLocalAllocBuffer& tlab = t->tlab();
1432 tlab.make_parsable();
1433 cl.do_thread(t);
1434 }
1435
1436 workers()->threads_do(&cl);
1437 }
1438
1439 void ShenandoahHeap::tlabs_retire(bool resize) {
1440 assert(UseTLAB, "Only call with UseTLAB");
1441 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1530
1531 workers()->threads_do(tcl);
1532 if (_safepoint_workers != nullptr) {
1533 _safepoint_workers->threads_do(tcl);
1534 }
1535 }
1536
1537 void ShenandoahHeap::print_tracing_info() const {
1538 LogTarget(Info, gc, stats) lt;
1539 if (lt.is_enabled()) {
1540 ResourceMark rm;
1541 LogStream ls(lt);
1542
1543 phase_timings()->print_global_on(&ls);
1544
1545 ls.cr();
1546 ls.cr();
1547
1548 shenandoah_policy()->print_gc_stats(&ls);
1549
1550 ls.cr();
1551
1552 evac_tracker()->print_global_on(&ls);
1553
1554 ls.cr();
1555 ls.cr();
1556 }
1557 }
1558
1559 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1560 shenandoah_assert_control_or_vm_thread_at_safepoint();
1561 _gc_generation = generation;
1562 }
1563
1564 // Active generation may only be set by the VM thread at a safepoint.
1565 void ShenandoahHeap::set_active_generation() {
1566 assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1567 assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1568 assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1569 _active_generation = _gc_generation;
1570 }
1571
1572 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1573 shenandoah_policy()->record_collection_cause(cause);
1574
1575 assert(gc_cause() == GCCause::_no_gc, "Over-writing cause");
1576 assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1577
1578 set_gc_cause(cause);
1579 set_gc_generation(generation);
1580
1581 generation->heuristics()->record_cycle_start();
1582 }
1583
1584 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1585 assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1586 assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1587
1588 generation->heuristics()->record_cycle_end();
1589 if (mode()->is_generational() && generation->is_global()) {
1590 // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1591 young_generation()->heuristics()->record_cycle_end();
1592 old_generation()->heuristics()->record_cycle_end();
1593 }
1594
1595 set_gc_generation(nullptr);
1596 set_gc_cause(GCCause::_no_gc);
1597 }
1598
1599 void ShenandoahHeap::verify(VerifyOption vo) {
1600 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1601 if (ShenandoahVerify) {
1602 verifier()->verify_generic(vo);
1603 } else {
1604 // TODO: Consider allocating verification bitmaps on demand,
1605 // and turn this on unconditionally.
1606 }
1607 }
1608 }
1609 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1610 return _free_set->capacity();
1611 }
1612
1613 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1614 private:
1615 MarkBitMap* _bitmap;
1616 ShenandoahScanObjectStack* _oop_stack;
1617 ShenandoahHeap* const _heap;
1618 ShenandoahMarkingContext* const _marking_context;
1915 } else {
1916 heap_region_iterate(blk);
1917 }
1918 }
1919
1920 class ShenandoahRendezvousClosure : public HandshakeClosure {
1921 public:
1922 inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1923 inline void do_thread(Thread* thread) {}
1924 };
1925
1926 void ShenandoahHeap::rendezvous_threads() {
1927 ShenandoahRendezvousClosure cl;
1928 Handshake::execute(&cl);
1929 }
1930
1931 void ShenandoahHeap::recycle_trash() {
1932 free_set()->recycle_trash();
1933 }
1934
1935 void ShenandoahHeap::do_class_unloading() {
1936 _unloader.unload();
1937 if (mode()->is_generational()) {
1938 old_generation()->set_parseable(false);
1939 }
1940 }
1941
1942 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1943 // Weak refs processing
1944 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1945 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1946 ShenandoahTimingsTracker t(phase);
1947 ShenandoahGCWorkerPhase worker_phase(phase);
1948 shenandoah_assert_generations_reconciled();
1949 gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
1950 }
1951
1952 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1953 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1954
1955 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1956 // make them parsable for update code to work correctly. Plus, we can compute new sizes
1957 // for future GCLABs here.
1958 if (UseTLAB) {
1959 ShenandoahGCPhase phase(concurrent ?
1960 ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1961 ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1962 gclabs_retire(ResizeTLAB);
1963 }
1964
1965 _update_refs_iterator.reset();
1966 }
1967
1968 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1969 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1970 if (_gc_state_changed) {
1971 _gc_state_changed = false;
1972 char state = gc_state();
1973 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1974 ShenandoahThreadLocalData::set_gc_state(t, state);
1975 }
1976 }
1977 }
1978
1979 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1980 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1981 _gc_state.set_cond(mask, value);
1982 _gc_state_changed = true;
1983 // Check that if concurrent weak root is set then active_gen isn't null
1984 assert(!is_concurrent_weak_root_in_progress() || active_generation() != nullptr, "Error");
1985 shenandoah_assert_generations_reconciled();
1986 }
1987
1988 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
1989 uint mask;
1990 assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
1991 if (!in_progress && is_concurrent_old_mark_in_progress()) {
1992 assert(mode()->is_generational(), "Only generational GC has old marking");
1993 assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
1994 // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
1995 mask = YOUNG_MARKING;
1996 } else {
1997 mask = MARKING | YOUNG_MARKING;
1998 }
1999 set_gc_state(mask, in_progress);
2000 manage_satb_barrier(in_progress);
2001 }
2002
2003 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2004 #ifdef ASSERT
2005 // has_forwarded_objects() iff UPDATEREFS or EVACUATION
2006 bool has_forwarded = has_forwarded_objects();
2007 bool updating_or_evacuating = _gc_state.is_set(UPDATEREFS | EVACUATION);
2008 bool evacuating = _gc_state.is_set(EVACUATION);
2009 assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
2010 "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
2011 #endif
2012 if (!in_progress && is_concurrent_young_mark_in_progress()) {
2013 // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2014 assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2015 set_gc_state(OLD_MARKING, in_progress);
2016 } else {
2017 set_gc_state(MARKING | OLD_MARKING, in_progress);
2018 }
2019 manage_satb_barrier(in_progress);
2020 }
2021
2022 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2023 return old_generation()->is_preparing_for_mark();
2024 }
2025
2026 void ShenandoahHeap::manage_satb_barrier(bool active) {
2027 if (is_concurrent_mark_in_progress()) {
2028 // Ignore request to deactivate barrier while concurrent mark is in progress.
2029 // Do not attempt to re-activate the barrier if it is already active.
2030 if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2031 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2032 }
2033 } else {
2034 // No concurrent marking is in progress so honor request to deactivate,
2035 // but only if the barrier is already active.
2036 if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2037 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2038 }
2039 }
2040 }
2041
2042 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2043 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2044 set_gc_state(EVACUATION, in_progress);
2045 }
2046
2047 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2048 if (in_progress) {
2049 _concurrent_strong_root_in_progress.set();
2050 } else {
2051 _concurrent_strong_root_in_progress.unset();
2052 }
2053 }
2054
2055 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2056 set_gc_state(WEAK_ROOTS, cond);
2057 }
2058
2059 GCTracer* ShenandoahHeap::tracer() {
2060 return shenandoah_policy()->tracer();
2061 }
2062
2063 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2064 return _free_set->used();
2065 }
2066
2067 bool ShenandoahHeap::try_cancel_gc() {
2068 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2069 return prev == CANCELLABLE;
2070 }
2071
2072 void ShenandoahHeap::cancel_concurrent_mark() {
2073 if (mode()->is_generational()) {
2074 young_generation()->cancel_marking();
2075 old_generation()->cancel_marking();
2076 }
2077
2078 global_generation()->cancel_marking();
2079
2080 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2081 }
2082
2083 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2084 if (try_cancel_gc()) {
2085 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2086 log_info(gc)("%s", msg.buffer());
2087 Events::log(Thread::current(), "%s", msg.buffer());
2088 _cancel_requested_time = os::elapsedTime();
2089 }
2090 }
2091
2092 uint ShenandoahHeap::max_workers() {
2093 return _max_workers;
2094 }
2095
2096 void ShenandoahHeap::stop() {
2097 // The shutdown sequence should be able to terminate when GC is running.
2098
2099 // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2100 _shenandoah_policy->record_shutdown();
2101
2102 // Step 1. Notify control thread that we are in shutdown.
2103 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2104 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2105 control_thread()->prepare_for_graceful_shutdown();
2106
2107 // Step 2. Notify GC workers that we are cancelling GC.
2108 cancel_gc(GCCause::_shenandoah_stop_vm);
2192 }
2193
2194 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2195 set_gc_state(HAS_FORWARDED, cond);
2196 }
2197
2198 void ShenandoahHeap::set_unload_classes(bool uc) {
2199 _unload_classes.set_cond(uc);
2200 }
2201
2202 bool ShenandoahHeap::unload_classes() const {
2203 return _unload_classes.is_set();
2204 }
2205
2206 address ShenandoahHeap::in_cset_fast_test_addr() {
2207 ShenandoahHeap* heap = ShenandoahHeap::heap();
2208 assert(heap->collection_set() != nullptr, "Sanity");
2209 return (address) heap->collection_set()->biased_map_address();
2210 }
2211
2212 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2213 if (mode()->is_generational()) {
2214 young_generation()->reset_bytes_allocated_since_gc_start();
2215 old_generation()->reset_bytes_allocated_since_gc_start();
2216 }
2217
2218 global_generation()->reset_bytes_allocated_since_gc_start();
2219 }
2220
2221 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2222 _degenerated_gc_in_progress.set_cond(in_progress);
2223 }
2224
2225 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2226 _full_gc_in_progress.set_cond(in_progress);
2227 }
2228
2229 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2230 assert (is_full_gc_in_progress(), "should be");
2231 _full_gc_move_in_progress.set_cond(in_progress);
2232 }
2233
2234 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2235 set_gc_state(UPDATEREFS, in_progress);
2236 }
2237
2238 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2262 if (r->is_active()) {
2263 if (r->is_pinned()) {
2264 if (r->pin_count() == 0) {
2265 r->make_unpinned();
2266 }
2267 } else {
2268 if (r->pin_count() > 0) {
2269 r->make_pinned();
2270 }
2271 }
2272 }
2273 }
2274
2275 assert_pinned_region_status();
2276 }
2277
2278 #ifdef ASSERT
2279 void ShenandoahHeap::assert_pinned_region_status() {
2280 for (size_t i = 0; i < num_regions(); i++) {
2281 ShenandoahHeapRegion* r = get_region(i);
2282 shenandoah_assert_generations_reconciled();
2283 if (gc_generation()->contains(r)) {
2284 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2285 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2286 }
2287 }
2288 }
2289 #endif
2290
2291 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2292 return _gc_timer;
2293 }
2294
2295 void ShenandoahHeap::prepare_concurrent_roots() {
2296 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2297 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2298 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2299 set_concurrent_weak_root_in_progress(true);
2300 if (unload_classes()) {
2301 _unloader.prepare();
2302 }
2303 }
2304
2305 void ShenandoahHeap::finish_concurrent_roots() {
2306 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2321 } else {
2322 // Use ConcGCThreads outside safepoints
2323 assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2324 ConcGCThreads, nworkers);
2325 }
2326 }
2327 #endif
2328
2329 ShenandoahVerifier* ShenandoahHeap::verifier() {
2330 guarantee(ShenandoahVerify, "Should be enabled");
2331 assert (_verifier != nullptr, "sanity");
2332 return _verifier;
2333 }
2334
2335 template<bool CONCURRENT>
2336 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2337 private:
2338 ShenandoahHeap* _heap;
2339 ShenandoahRegionIterator* _regions;
2340 public:
2341 explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2342 WorkerTask("Shenandoah Update References"),
2343 _heap(ShenandoahHeap::heap()),
2344 _regions(regions) {
2345 }
2346
2347 void work(uint worker_id) {
2348 if (CONCURRENT) {
2349 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2350 ShenandoahSuspendibleThreadSetJoiner stsj;
2351 do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2352 } else {
2353 ShenandoahParallelWorkerSession worker_session(worker_id);
2354 do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2355 }
2356 }
2357
2358 private:
2359 template<class T>
2360 void do_work(uint worker_id) {
2361 if (CONCURRENT && (worker_id == 0)) {
2362 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2363 // results of evacuation. These reserves are no longer necessary because evacuation has completed.
2364 size_t cset_regions = _heap->collection_set()->count();
2365
2366 // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2367 // to the mutator free set. At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2368 // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2369 // next GC cycle.
2370 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2371 }
2372 // If !CONCURRENT, there's no value in expanding Mutator free set
2373 T cl;
2374 ShenandoahHeapRegion* r = _regions->next();
2375 while (r != nullptr) {
2376 HeapWord* update_watermark = r->get_update_watermark();
2377 assert (update_watermark >= r->bottom(), "sanity");
2378 if (r->is_active() && !r->is_cset()) {
2379 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2380 if (ShenandoahPacing) {
2381 _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2382 }
2383 }
2384 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2385 return;
2386 }
2387 r = _regions->next();
2388 }
2389 }
2390 };
2391
2392 void ShenandoahHeap::update_heap_references(bool concurrent) {
2393 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2394
2395 if (concurrent) {
2396 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2397 workers()->run_task(&task);
2398 } else {
2399 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2400 workers()->run_task(&task);
2401 }
2402 }
2403
2404 ShenandoahSynchronizePinnedRegionStates::ShenandoahSynchronizePinnedRegionStates() : _lock(ShenandoahHeap::heap()->lock()) { }
2405
2406 void ShenandoahSynchronizePinnedRegionStates::heap_region_do(ShenandoahHeapRegion* r) {
2407 // Drop "pinned" state from regions that no longer have a pinned count. Put
2408 // regions with a pinned count into the "pinned" state.
2409 if (r->is_active()) {
2410 if (r->is_pinned()) {
2411 if (r->pin_count() == 0) {
2412 ShenandoahHeapLocker locker(_lock);
2413 r->make_unpinned();
2414 }
2415 } else {
2416 if (r->pin_count() > 0) {
2417 ShenandoahHeapLocker locker(_lock);
2418 r->make_pinned();
2419 }
2420 }
2421 }
2422 }
2423
2424 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2425 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2426 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2427
2428 {
2429 ShenandoahGCPhase phase(concurrent ?
2430 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2431 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2432
2433 final_update_refs_update_region_states();
2434
2435 assert_pinned_region_status();
2436 }
2437
2438 {
2439 ShenandoahGCPhase phase(concurrent ?
2440 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2441 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2442 trash_cset_regions();
2443 }
2444 }
2445
2446 void ShenandoahHeap::final_update_refs_update_region_states() {
2447 ShenandoahSynchronizePinnedRegionStates cl;
2448 parallel_heap_region_iterate(&cl);
2449 }
2450
2451 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2452 ShenandoahGCPhase phase(concurrent ?
2453 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2454 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2455 ShenandoahHeapLocker locker(lock());
2456 size_t young_cset_regions, old_cset_regions;
2457 size_t first_old_region, last_old_region, old_region_count;
2458 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2459 // If there are no old regions, first_old_region will be greater than last_old_region
2460 assert((first_old_region > last_old_region) ||
2461 ((last_old_region + 1 - first_old_region >= old_region_count) &&
2462 get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2463 "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT,
2464 old_region_count, first_old_region, last_old_region);
2465
2466 if (mode()->is_generational()) {
2467 #ifdef ASSERT
2468 if (ShenandoahVerify) {
2469 verifier()->verify_before_rebuilding_free_set();
2470 }
2471 #endif
2472
2473 // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2474 // available for transfer to old. Note that transfer of humongous regions does not impact available.
2475 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2476 size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2477 gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2478
2479 // Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
2480 // memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
2481 // regions in place when many of these regular regions have an abundant amount of available memory within them. Fragmentation
2482 // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2483 //
2484 // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2485 // within partially consumed regions of memory.
2486 }
2487 // Rebuild free set based on adjusted generation sizes.
2488 _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2489
2490 if (mode()->is_generational()) {
2491 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2492 ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2493 old_gen->heuristics()->trigger_maybe(first_old_region, last_old_region, old_region_count, num_regions());
2494 }
2495 }
2496
2497 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2498 print_on(st);
2499 st->cr();
2500 print_heap_regions_on(st);
2501 }
2502
2503 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2504 size_t slice = r->index() / _bitmap_regions_per_slice;
2505
2506 size_t regions_from = _bitmap_regions_per_slice * slice;
2507 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2508 for (size_t g = regions_from; g < regions_to; g++) {
2509 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2510 if (skip_self && g == r->index()) continue;
2511 if (get_region(g)->is_committed()) {
2512 return true;
2513 }
2596 void ShenandoahHeap::initialize_serviceability() {
2597 _memory_pool = new ShenandoahMemoryPool(this);
2598 _cycle_memory_manager.add_pool(_memory_pool);
2599 _stw_memory_manager.add_pool(_memory_pool);
2600 }
2601
2602 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2603 GrowableArray<GCMemoryManager*> memory_managers(2);
2604 memory_managers.append(&_cycle_memory_manager);
2605 memory_managers.append(&_stw_memory_manager);
2606 return memory_managers;
2607 }
2608
2609 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2610 GrowableArray<MemoryPool*> memory_pools(1);
2611 memory_pools.append(_memory_pool);
2612 return memory_pools;
2613 }
2614
2615 MemoryUsage ShenandoahHeap::memory_usage() {
2616 return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2617 }
2618
2619 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2620 _heap(ShenandoahHeap::heap()),
2621 _index(0) {}
2622
2623 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2624 _heap(heap),
2625 _index(0) {}
2626
2627 void ShenandoahRegionIterator::reset() {
2628 _index = 0;
2629 }
2630
2631 bool ShenandoahRegionIterator::has_next() const {
2632 return _index < _heap->num_regions();
2633 }
2634
2635 char ShenandoahHeap::gc_state() const {
2636 return _gc_state.raw_value();
2661 }
2662 }
2663
2664 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2665 if (is_idle()) return false;
2666
2667 // Objects allocated after marking start are implicitly alive, don't need any barriers during
2668 // marking phase.
2669 if (is_concurrent_mark_in_progress() &&
2670 !marking_context()->allocated_after_mark_start(obj)) {
2671 return true;
2672 }
2673
2674 // Can not guarantee obj is deeply good.
2675 if (has_forwarded_objects()) {
2676 return true;
2677 }
2678
2679 return false;
2680 }
2681
2682 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2683 if (!mode()->is_generational()) {
2684 return global_generation();
2685 } else if (affiliation == YOUNG_GENERATION) {
2686 return young_generation();
2687 } else if (affiliation == OLD_GENERATION) {
2688 return old_generation();
2689 }
2690
2691 ShouldNotReachHere();
2692 return nullptr;
2693 }
2694
2695 void ShenandoahHeap::log_heap_status(const char* msg) const {
2696 if (mode()->is_generational()) {
2697 young_generation()->log_status(msg);
2698 old_generation()->log_status(msg);
2699 } else {
2700 global_generation()->log_status(msg);
2701 }
2702 }
|