1 /*
2 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "memory/allocation.hpp"
28 #include "memory/universe.hpp"
29
30 #include "gc/shared/classUnloadingContext.hpp"
31 #include "gc/shared/gcArguments.hpp"
32 #include "gc/shared/gcTimer.hpp"
33 #include "gc/shared/gcTraceTime.inline.hpp"
34 #include "gc/shared/locationPrinter.inline.hpp"
35 #include "gc/shared/memAllocator.hpp"
36 #include "gc/shared/plab.hpp"
37 #include "gc/shared/tlab_globals.hpp"
38
39 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
40 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
41 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
42 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
43 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahControlThread.hpp"
46 #include "gc/shenandoah/shenandoahFreeSet.hpp"
47 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
48 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
49 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
50 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
51 #include "gc/shenandoah/shenandoahInitLogger.hpp"
52 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
53 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
54 #include "gc/shenandoah/shenandoahMetrics.hpp"
55 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
56 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
58 #include "gc/shenandoah/shenandoahPadding.hpp"
59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
60 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
61 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
63 #include "gc/shenandoah/shenandoahUtils.hpp"
64 #include "gc/shenandoah/shenandoahVerifier.hpp"
65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
69 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
70 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
71 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
72 #if INCLUDE_JFR
73 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
74 #endif
75
76 #include "classfile/systemDictionary.hpp"
77 #include "code/codeCache.hpp"
78 #include "memory/classLoaderMetaspace.hpp"
79 #include "memory/metaspaceUtils.hpp"
80 #include "nmt/mallocTracker.hpp"
81 #include "nmt/memTracker.hpp"
82 #include "oops/compressedOops.inline.hpp"
83 #include "prims/jvmtiTagMap.hpp"
84 #include "runtime/atomic.hpp"
85 #include "runtime/globals.hpp"
86 #include "runtime/interfaceSupport.inline.hpp"
87 #include "runtime/java.hpp"
88 #include "runtime/orderAccess.hpp"
89 #include "runtime/safepointMechanism.hpp"
90 #include "runtime/stackWatermarkSet.hpp"
91 #include "runtime/vmThread.hpp"
144 jint ShenandoahHeap::initialize() {
145 //
146 // Figure out heap sizing
147 //
148
149 size_t init_byte_size = InitialHeapSize;
150 size_t min_byte_size = MinHeapSize;
151 size_t max_byte_size = MaxHeapSize;
152 size_t heap_alignment = HeapAlignment;
153
154 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
155
156 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
157 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
158
159 _num_regions = ShenandoahHeapRegion::region_count();
160 assert(_num_regions == (max_byte_size / reg_size_bytes),
161 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
162 _num_regions, max_byte_size, reg_size_bytes);
163
164 // Now we know the number of regions, initialize the heuristics.
165 initialize_heuristics();
166
167 size_t num_committed_regions = init_byte_size / reg_size_bytes;
168 num_committed_regions = MIN2(num_committed_regions, _num_regions);
169 assert(num_committed_regions <= _num_regions, "sanity");
170 _initial_size = num_committed_regions * reg_size_bytes;
171
172 size_t num_min_regions = min_byte_size / reg_size_bytes;
173 num_min_regions = MIN2(num_min_regions, _num_regions);
174 assert(num_min_regions <= _num_regions, "sanity");
175 _minimum_size = num_min_regions * reg_size_bytes;
176
177 // Default to max heap size.
178 _soft_max_size = _num_regions * reg_size_bytes;
179
180 _committed = _initial_size;
181
182 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
183 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
184 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
185
186 //
187 // Reserve and commit memory for heap
188 //
189
190 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
191 initialize_reserved_region(heap_rs);
192 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
193 _heap_region_special = heap_rs.special();
194
195 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
196 "Misaligned heap: " PTR_FORMAT, p2i(base()));
197 os::trace_page_sizes_for_requested_size("Heap",
198 max_byte_size, heap_alignment,
199 heap_rs.base(),
200 heap_rs.size(), heap_rs.page_size());
201
202 #if SHENANDOAH_OPTIMIZED_MARKTASK
203 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
204 // Fail if we ever attempt to address more than we can.
205 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
206 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
207 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
208 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
209 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
210 vm_exit_during_initialization("Fatal Error", buf);
211 }
212 #endif
213
214 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
215 if (!_heap_region_special) {
216 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
217 "Cannot commit heap memory");
218 }
219
220 //
221 // Reserve and commit memory for bitmap(s)
222 //
223
224 size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
225 _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
226
227 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
228
229 guarantee(bitmap_bytes_per_region != 0,
230 "Bitmap bytes per region should not be zero");
231 guarantee(is_power_of_2(bitmap_bytes_per_region),
232 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
233
234 if (bitmap_page_size > bitmap_bytes_per_region) {
235 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
236 _bitmap_bytes_per_slice = bitmap_page_size;
237 } else {
238 _bitmap_regions_per_slice = 1;
239 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
240 }
241
242 guarantee(_bitmap_regions_per_slice >= 1,
243 "Should have at least one region per slice: " SIZE_FORMAT,
244 _bitmap_regions_per_slice);
245
246 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
247 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
248 _bitmap_bytes_per_slice, bitmap_page_size);
249
250 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
251 os::trace_page_sizes_for_requested_size("Mark Bitmap",
252 bitmap_size_orig, bitmap_page_size,
253 bitmap.base(),
254 bitmap.size(), bitmap.page_size());
255 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
256 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
257 _bitmap_region_special = bitmap.special();
258
259 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
260 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
261 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
262 if (!_bitmap_region_special) {
263 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
264 "Cannot commit bitmap memory");
265 }
266
267 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
268
269 if (ShenandoahVerify) {
270 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
271 os::trace_page_sizes_for_requested_size("Verify Bitmap",
272 bitmap_size_orig, bitmap_page_size,
273 verify_bitmap.base(),
274 verify_bitmap.size(), verify_bitmap.page_size());
275 if (!verify_bitmap.special()) {
276 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
277 "Cannot commit verification bitmap memory");
278 }
279 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
280 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
281 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
282 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
283 }
284
285 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
286 size_t aux_bitmap_page_size = bitmap_page_size;
287 #ifdef LINUX
338 assert(is_aligned(req_addr, cset_align), "Should be aligned");
339 cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
340 if (cset_rs.is_reserved()) {
341 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
342 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
343 break;
344 }
345 }
346
347 if (_collection_set == nullptr) {
348 cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
349 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
350 }
351 os::trace_page_sizes_for_requested_size("Collection Set",
352 cset_size, cset_page_size,
353 cset_rs.base(),
354 cset_rs.size(), cset_rs.page_size());
355 }
356
357 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
358 _free_set = new ShenandoahFreeSet(this, _num_regions);
359
360 {
361 ShenandoahHeapLocker locker(lock());
362
363 for (size_t i = 0; i < _num_regions; i++) {
364 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
365 bool is_committed = i < num_committed_regions;
366 void* loc = region_storage.base() + i * region_align;
367
368 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
369 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
370
371 _marking_context->initialize_top_at_mark_start(r);
372 _regions[i] = r;
373 assert(!collection_set()->is_in(i), "New region should not be in collection set");
374 }
375
376 // Initialize to complete
377 _marking_context->mark_complete();
378
379 _free_set->rebuild();
380 }
381
382 if (AlwaysPreTouch) {
383 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
384 // before initialize() below zeroes it with initializing thread. For any given region,
385 // we touch the region and the corresponding bitmaps from the same thread.
386 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
387
388 _pretouch_heap_page_size = heap_page_size;
389 _pretouch_bitmap_page_size = bitmap_page_size;
390
391 #ifdef LINUX
392 // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
393 // pages. But, the kernel needs to know that every small page is used, in order to coalesce
394 // them into huge one. Therefore, we need to pretouch with smaller pages.
395 if (UseTransparentHugePages) {
396 _pretouch_heap_page_size = (size_t)os::vm_page_size();
397 _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
398 }
399 #endif
418 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
419 }
420
421 // There should probably be Shenandoah-specific options for these,
422 // just as there are G1-specific options.
423 {
424 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
425 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
426 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
427 }
428
429 _monitoring_support = new ShenandoahMonitoringSupport(this);
430 _phase_timings = new ShenandoahPhaseTimings(max_workers());
431 ShenandoahCodeRoots::initialize();
432
433 if (ShenandoahPacing) {
434 _pacer = new ShenandoahPacer(this);
435 _pacer->setup_for_idle();
436 }
437
438 _control_thread = new ShenandoahControlThread();
439
440 ShenandoahInitLogger::print();
441
442 return JNI_OK;
443 }
444
445 void ShenandoahHeap::initialize_mode() {
446 if (ShenandoahGCMode != nullptr) {
447 if (strcmp(ShenandoahGCMode, "satb") == 0) {
448 _gc_mode = new ShenandoahSATBMode();
449 } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
450 _gc_mode = new ShenandoahIUMode();
451 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
452 _gc_mode = new ShenandoahPassiveMode();
453 } else {
454 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
455 }
456 } else {
457 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
458 }
459 _gc_mode->initialize_flags();
460 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
461 vm_exit_during_initialization(
462 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
463 _gc_mode->name()));
464 }
465 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
466 vm_exit_during_initialization(
467 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
468 _gc_mode->name()));
469 }
470 }
471
472 void ShenandoahHeap::initialize_heuristics() {
473 assert(_gc_mode != nullptr, "Must be initialized");
474 _heuristics = _gc_mode->initialize_heuristics();
475
476 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
477 vm_exit_during_initialization(
478 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
479 _heuristics->name()));
480 }
481 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
482 vm_exit_during_initialization(
483 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
484 _heuristics->name()));
485 }
486 }
487
488 #ifdef _MSC_VER
489 #pragma warning( push )
490 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
491 #endif
492
493 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
494 CollectedHeap(),
495 _initial_size(0),
496 _used(0),
497 _committed(0),
498 _bytes_allocated_since_gc_start(0),
499 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
500 _workers(nullptr),
501 _safepoint_workers(nullptr),
502 _heap_region_special(false),
503 _num_regions(0),
504 _regions(nullptr),
505 _update_refs_iterator(this),
506 _gc_state_changed(false),
507 _gc_no_progress_count(0),
508 _control_thread(nullptr),
509 _shenandoah_policy(policy),
510 _gc_mode(nullptr),
511 _heuristics(nullptr),
512 _free_set(nullptr),
513 _pacer(nullptr),
514 _verifier(nullptr),
515 _phase_timings(nullptr),
516 _monitoring_support(nullptr),
517 _memory_pool(nullptr),
518 _stw_memory_manager("Shenandoah Pauses"),
519 _cycle_memory_manager("Shenandoah Cycles"),
520 _gc_timer(new ConcurrentGCTimer()),
521 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
522 _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
523 _marking_context(nullptr),
524 _bitmap_size(0),
525 _bitmap_regions_per_slice(0),
526 _bitmap_bytes_per_slice(0),
527 _bitmap_region_special(false),
528 _aux_bitmap_region_special(false),
529 _liveness_cache(nullptr),
530 _collection_set(nullptr)
531 {
532 // Initialize GC mode early, so we can adjust barrier support
533 initialize_mode();
534 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
535
536 _max_workers = MAX2(_max_workers, 1U);
537 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
538 if (_workers == nullptr) {
539 vm_exit_during_initialization("Failed necessary allocation.");
540 } else {
541 _workers->initialize_workers();
542 }
543
544 if (ParallelGCThreads > 1) {
545 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
546 ParallelGCThreads);
547 _safepoint_workers->initialize_workers();
548 }
549 }
550
551 #ifdef _MSC_VER
552 #pragma warning( pop )
553 #endif
554
555 class ShenandoahResetBitmapTask : public WorkerTask {
556 private:
557 ShenandoahRegionIterator _regions;
558
559 public:
560 ShenandoahResetBitmapTask() :
561 WorkerTask("Shenandoah Reset Bitmap") {}
562
563 void work(uint worker_id) {
564 ShenandoahHeapRegion* region = _regions.next();
565 ShenandoahHeap* heap = ShenandoahHeap::heap();
566 ShenandoahMarkingContext* const ctx = heap->marking_context();
567 while (region != nullptr) {
568 if (heap->is_bitmap_slice_committed(region)) {
569 ctx->clear_bitmap(region);
570 }
571 region = _regions.next();
572 }
573 }
574 };
575
576 void ShenandoahHeap::reset_mark_bitmap() {
577 assert_gc_workers(_workers->active_workers());
578 mark_incomplete_marking_context();
579
580 ShenandoahResetBitmapTask task;
581 _workers->run_task(&task);
582 }
583
584 void ShenandoahHeap::print_on(outputStream* st) const {
585 st->print_cr("Shenandoah Heap");
586 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
587 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
588 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
589 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
590 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
591 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
592 num_regions(),
593 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
594 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
595
596 st->print("Status: ");
597 if (has_forwarded_objects()) st->print("has forwarded objects, ");
598 if (is_concurrent_mark_in_progress()) st->print("marking, ");
599 if (is_evacuation_in_progress()) st->print("evacuating, ");
600 if (is_update_refs_in_progress()) st->print("updating refs, ");
601 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
602 if (is_full_gc_in_progress()) st->print("full gc, ");
603 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
604 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
605 if (is_concurrent_strong_root_in_progress() &&
606 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
607
608 if (cancelled_gc()) {
609 st->print("cancelled");
610 } else {
611 st->print("not cancelled");
612 }
613 st->cr();
614
615 st->print_cr("Reserved region:");
616 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
617 p2i(reserved_region().start()),
618 p2i(reserved_region().end()));
629 st->cr();
630 MetaspaceUtils::print_on(st);
631
632 if (Verbose) {
633 st->cr();
634 print_heap_regions_on(st);
635 }
636 }
637
638 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
639 public:
640 void do_thread(Thread* thread) {
641 assert(thread != nullptr, "Sanity");
642 assert(thread->is_Worker_thread(), "Only worker thread expected");
643 ShenandoahThreadLocalData::initialize_gclab(thread);
644 }
645 };
646
647 void ShenandoahHeap::post_initialize() {
648 CollectedHeap::post_initialize();
649 MutexLocker ml(Threads_lock);
650
651 ShenandoahInitWorkerGCLABClosure init_gclabs;
652 _workers->threads_do(&init_gclabs);
653
654 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
655 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
656 _workers->set_initialize_gclab();
657 if (_safepoint_workers != nullptr) {
658 _safepoint_workers->threads_do(&init_gclabs);
659 _safepoint_workers->set_initialize_gclab();
660 }
661
662 _heuristics->initialize();
663
664 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
665 }
666
667 size_t ShenandoahHeap::used() const {
668 return Atomic::load(&_used);
669 }
670
671 size_t ShenandoahHeap::committed() const {
672 return Atomic::load(&_committed);
673 }
674
675 size_t ShenandoahHeap::available() const {
676 return free_set()->available();
677 }
678
679 void ShenandoahHeap::increase_committed(size_t bytes) {
680 shenandoah_assert_heaplocked_or_safepoint();
681 _committed += bytes;
682 }
683
684 void ShenandoahHeap::decrease_committed(size_t bytes) {
685 shenandoah_assert_heaplocked_or_safepoint();
686 _committed -= bytes;
687 }
688
689 void ShenandoahHeap::increase_used(size_t bytes) {
690 Atomic::add(&_used, bytes, memory_order_relaxed);
691 }
692
693 void ShenandoahHeap::set_used(size_t bytes) {
694 Atomic::store(&_used, bytes);
695 }
696
697 void ShenandoahHeap::decrease_used(size_t bytes) {
698 assert(used() >= bytes, "never decrease heap size by more than we've left");
699 Atomic::sub(&_used, bytes, memory_order_relaxed);
700 }
701
702 void ShenandoahHeap::increase_allocated(size_t bytes) {
703 Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
704 }
705
706 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
707 size_t bytes = words * HeapWordSize;
708 if (!waste) {
709 increase_used(bytes);
710 }
711 increase_allocated(bytes);
712 if (ShenandoahPacing) {
713 control_thread()->pacing_notify_alloc(words);
714 if (waste) {
715 pacer()->claim_for_alloc(words, true);
716 }
717 }
718 }
719
720 size_t ShenandoahHeap::capacity() const {
721 return committed();
722 }
723
724 size_t ShenandoahHeap::max_capacity() const {
725 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
726 }
727
728 size_t ShenandoahHeap::soft_max_capacity() const {
729 size_t v = Atomic::load(&_soft_max_size);
730 assert(min_capacity() <= v && v <= max_capacity(),
731 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
732 min_capacity(), v, max_capacity());
733 return v;
734 }
735
736 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
737 assert(min_capacity() <= v && v <= max_capacity(),
738 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
739 min_capacity(), v, max_capacity());
740 Atomic::store(&_soft_max_size, v);
741 }
742
743 size_t ShenandoahHeap::min_capacity() const {
744 return _minimum_size;
745 }
746
747 size_t ShenandoahHeap::initial_capacity() const {
748 return _initial_size;
749 }
750
751 bool ShenandoahHeap::is_in(const void* p) const {
752 HeapWord* heap_base = (HeapWord*) base();
753 HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
754 return p >= heap_base && p < last_region_end;
755 }
756
757 void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
758 assert (ShenandoahUncommit, "should be enabled");
759
760 // Determine if there is work to do. This avoids taking heap lock if there is
761 // no work available, avoids spamming logs with superfluous logging messages,
762 // and minimises the amount of work while locks are taken.
763
764 if (committed() <= shrink_until) return;
765
766 bool has_work = false;
767 for (size_t i = 0; i < num_regions(); i++) {
768 ShenandoahHeapRegion* r = get_region(i);
769 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
770 has_work = true;
771 break;
772 }
773 }
774
775 if (has_work) {
776 static const char* msg = "Concurrent uncommit";
816 size_t old_soft_max = soft_max_capacity();
817 if (new_soft_max != old_soft_max) {
818 new_soft_max = MAX2(min_capacity(), new_soft_max);
819 new_soft_max = MIN2(max_capacity(), new_soft_max);
820 if (new_soft_max != old_soft_max) {
821 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
822 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
823 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
824 );
825 set_soft_max_capacity(new_soft_max);
826 return true;
827 }
828 }
829 return false;
830 }
831
832 void ShenandoahHeap::notify_heap_changed() {
833 // Update monitoring counters when we took a new region. This amortizes the
834 // update costs on slow path.
835 monitoring_support()->notify_heap_changed();
836
837 // This is called from allocation path, and thus should be fast.
838 _heap_changed.try_set();
839 }
840
841 void ShenandoahHeap::set_forced_counters_update(bool value) {
842 monitoring_support()->set_forced_counters_update(value);
843 }
844
845 void ShenandoahHeap::handle_force_counters_update() {
846 monitoring_support()->handle_force_counters_update();
847 }
848
849 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
850 // New object should fit the GCLAB size
851 size_t min_size = MAX2(size, PLAB::min_size());
852
853 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
854 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
855 new_size = MIN2(new_size, PLAB::max_size());
856 new_size = MAX2(new_size, PLAB::min_size());
857
858 // Record new heuristic value even if we take any shortcut. This captures
859 // the case when moderately-sized objects always take a shortcut. At some point,
860 // heuristics should catch up with them.
861 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
862
863 if (new_size < size) {
864 // New size still does not fit the object. Fall back to shared allocation.
865 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
866 return nullptr;
867 }
868
869 // Retire current GCLAB, and allocate a new one.
870 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
871 gclab->retire();
872
873 size_t actual_size = 0;
874 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
875 if (gclab_buf == nullptr) {
876 return nullptr;
877 }
878
879 assert (size <= actual_size, "allocation should fit");
880
881 // ...and clear or zap just allocated TLAB, if needed.
882 if (ZeroTLAB) {
883 Copy::zero_to_words(gclab_buf, actual_size);
884 } else if (ZapTLAB) {
885 // Skip mangling the space corresponding to the object header to
886 // ensure that the returned space is not considered parsable by
887 // any concurrent GC thread.
888 size_t hdr_size = oopDesc::header_size();
889 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
890 }
891 gclab->set_buf(gclab_buf, actual_size);
892 return gclab->allocate(size);
893 }
894
895 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
896 size_t requested_size,
897 size_t* actual_size) {
898 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
899 HeapWord* res = allocate_memory(req);
900 if (res != nullptr) {
901 *actual_size = req.actual_size();
902 } else {
903 *actual_size = 0;
904 }
905 return res;
906 }
907
908 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
909 size_t word_size,
910 size_t* actual_size) {
911 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
912 HeapWord* res = allocate_memory(req);
913 if (res != nullptr) {
914 *actual_size = req.actual_size();
915 } else {
916 *actual_size = 0;
917 }
918 return res;
919 }
920
921 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
922 intptr_t pacer_epoch = 0;
923 bool in_new_region = false;
924 HeapWord* result = nullptr;
925
926 if (req.is_mutator_alloc()) {
927 if (ShenandoahPacing) {
928 pacer()->pace_for_alloc(req.size());
929 pacer_epoch = pacer()->epoch();
930 }
931
932 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
933 result = allocate_memory_under_lock(req, in_new_region);
934 }
935
936 // Check that gc overhead is not exceeded.
937 //
938 // Shenandoah will grind along for quite a while allocating one
939 // object at a time using shared (non-tlab) allocations. This check
940 // is testing that the GC overhead limit has not been exceeded.
941 // This will notify the collector to start a cycle, but will raise
942 // an OOME to the mutator if the last Full GCs have not made progress.
943 if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
944 control_thread()->handle_alloc_failure(req, false);
945 return nullptr;
946 }
947
948 // Block until control thread reacted, then retry allocation.
949 //
950 // It might happen that one of the threads requesting allocation would unblock
951 // way later after GC happened, only to fail the second allocation, because
952 // other threads have already depleted the free storage. In this case, a better
953 // strategy is to try again, as long as GC makes progress (or until at least
954 // one full GC has completed).
955 size_t original_count = shenandoah_policy()->full_gc_count();
956 while (result == nullptr
957 && (get_gc_no_progress_count() == 0 || original_count == shenandoah_policy()->full_gc_count())) {
958 control_thread()->handle_alloc_failure(req);
959 result = allocate_memory_under_lock(req, in_new_region);
960 }
961
962 if (log_is_enabled(Debug, gc, alloc)) {
963 ResourceMark rm;
964 log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
965 Thread::current()->name(), p2i(result), req.type_string(), req.size(), original_count, get_gc_no_progress_count());
966 }
967 } else {
968 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
969 result = allocate_memory_under_lock(req, in_new_region);
970 // Do not call handle_alloc_failure() here, because we cannot block.
971 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
972 }
973
974 if (in_new_region) {
975 notify_heap_changed();
976 }
977
978 if (result != nullptr) {
979 size_t requested = req.size();
980 size_t actual = req.actual_size();
981
982 assert (req.is_lab_alloc() || (requested == actual),
983 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
984 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
985
986 if (req.is_mutator_alloc()) {
987 notify_mutator_alloc_words(actual, false);
988
989 // If we requested more than we were granted, give the rest back to pacer.
990 // This only matters if we are in the same pacing epoch: do not try to unpace
991 // over the budget for the other phase.
992 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
993 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
994 }
995 } else {
996 increase_used(actual*HeapWordSize);
997 }
998 }
999
1000 return result;
1001 }
1002
1003 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1004 // If we are dealing with mutator allocation, then we may need to block for safepoint.
1005 // We cannot block for safepoint for GC allocations, because there is a high chance
1006 // we are already running at safepoint or from stack watermark machinery, and we cannot
1007 // block again.
1008 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1009 return _free_set->allocate(req, in_new_region);
1010 }
1011
1012 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1013 bool* gc_overhead_limit_was_exceeded) {
1014 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1015 return allocate_memory(req);
1016 }
1017
1018 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1019 size_t size,
1020 Metaspace::MetadataType mdtype) {
1021 MetaWord* result;
1022
1023 // Inform metaspace OOM to GC heuristics if class unloading is possible.
1024 if (heuristics()->can_unload_classes()) {
1025 ShenandoahHeuristics* h = heuristics();
1026 h->record_metaspace_oom();
1027 }
1028
1029 // Expand and retry allocation
1030 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1031 if (result != nullptr) {
1032 return result;
1033 }
1034
1035 // Start full GC
1036 collect(GCCause::_metadata_GC_clear_soft_refs);
1037
1038 // Retry allocation
1039 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1040 if (result != nullptr) {
1041 return result;
1042 }
1043
1044 // Expand and retry allocation
1045 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1098 private:
1099 void do_work() {
1100 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1101 ShenandoahHeapRegion* r;
1102 while ((r =_cs->claim_next()) != nullptr) {
1103 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1104 _sh->marked_object_iterate(r, &cl);
1105
1106 if (ShenandoahPacing) {
1107 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1108 }
1109
1110 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1111 break;
1112 }
1113 }
1114 }
1115 };
1116
1117 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1118 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1119 workers()->run_task(&task);
1120 }
1121
1122 void ShenandoahHeap::trash_cset_regions() {
1123 ShenandoahHeapLocker locker(lock());
1124
1125 ShenandoahCollectionSet* set = collection_set();
1126 ShenandoahHeapRegion* r;
1127 set->clear_current_index();
1128 while ((r = set->next()) != nullptr) {
1129 r->make_trash();
1130 }
1131 collection_set()->clear();
1132 }
1133
1134 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1135 st->print_cr("Heap Regions:");
1136 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1137 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1138 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1139 st->print_cr("UWM=update watermark, U=used");
1140 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1141 st->print_cr("S=shared allocs, L=live data");
1142 st->print_cr("CP=critical pins");
1143
1144 for (size_t i = 0; i < num_regions(); i++) {
1145 get_region(i)->print_on(st);
1146 }
1147 }
1148
1149 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1150 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1151
1152 oop humongous_obj = cast_to_oop(start->bottom());
1153 size_t size = humongous_obj->size();
1154 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1155 size_t index = start->index() + required_regions - 1;
1156
1157 assert(!start->has_live(), "liveness must be zero");
1158
1159 for(size_t i = 0; i < required_regions; i++) {
1160 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1161 // as it expects that every region belongs to a humongous region starting with a humongous start region.
1162 ShenandoahHeapRegion* region = get_region(index --);
1163
1164 assert(region->is_humongous(), "expect correct humongous start or continuation");
1165 assert(!region->is_cset(), "Humongous region should not be in collection set");
1166
1167 region->make_trash_immediate();
1168 }
1169 }
1170
1171 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1172 public:
1173 ShenandoahCheckCleanGCLABClosure() {}
1174 void do_thread(Thread* thread) {
1175 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1176 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1177 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1178 }
1179 };
1180
1181 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1182 private:
1183 bool const _resize;
1184 public:
1185 ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1186 void do_thread(Thread* thread) {
1187 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1188 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1189 gclab->retire();
1190 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1191 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1192 }
1193 }
1194 };
1195
1196 void ShenandoahHeap::labs_make_parsable() {
1197 assert(UseTLAB, "Only call with UseTLAB");
1198
1199 ShenandoahRetireGCLABClosure cl(false);
1200
1201 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1202 ThreadLocalAllocBuffer& tlab = t->tlab();
1203 tlab.make_parsable();
1204 cl.do_thread(t);
1205 }
1206
1207 workers()->threads_do(&cl);
1208 }
1209
1210 void ShenandoahHeap::tlabs_retire(bool resize) {
1211 assert(UseTLAB, "Only call with UseTLAB");
1212 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1232 #endif
1233 }
1234
1235 void ShenandoahHeap::gclabs_retire(bool resize) {
1236 assert(UseTLAB, "Only call with UseTLAB");
1237 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1238
1239 ShenandoahRetireGCLABClosure cl(resize);
1240 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1241 cl.do_thread(t);
1242 }
1243 workers()->threads_do(&cl);
1244
1245 if (safepoint_workers() != nullptr) {
1246 safepoint_workers()->threads_do(&cl);
1247 }
1248 }
1249
1250 // Returns size in bytes
1251 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1252 // Return the max allowed size, and let the allocation path
1253 // figure out the safe size for current allocation.
1254 return ShenandoahHeapRegion::max_tlab_size_bytes();
1255 }
1256
1257 size_t ShenandoahHeap::max_tlab_size() const {
1258 // Returns size in words
1259 return ShenandoahHeapRegion::max_tlab_size_words();
1260 }
1261
1262 void ShenandoahHeap::collect(GCCause::Cause cause) {
1263 control_thread()->request_gc(cause);
1264 }
1265
1266 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1267 //assert(false, "Shouldn't need to do full collections");
1268 }
1269
1270 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1271 ShenandoahHeapRegion* r = heap_region_containing(addr);
1272 if (r != nullptr) {
1273 return r->block_start(addr);
1274 }
1275 return nullptr;
1276 }
1277
1278 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1279 ShenandoahHeapRegion* r = heap_region_containing(addr);
1280 return r->block_is_obj(addr);
1281 }
1282
1283 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1284 return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1285 }
1286
1287 void ShenandoahHeap::prepare_for_verify() {
1288 if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1289 labs_make_parsable();
1290 }
1291 }
1292
1293 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1294 tcl->do_thread(_control_thread);
1295 workers()->threads_do(tcl);
1296 if (_safepoint_workers != nullptr) {
1297 _safepoint_workers->threads_do(tcl);
1298 }
1299 }
1300
1301 void ShenandoahHeap::print_tracing_info() const {
1302 LogTarget(Info, gc, stats) lt;
1303 if (lt.is_enabled()) {
1304 ResourceMark rm;
1305 LogStream ls(lt);
1306
1307 phase_timings()->print_global_on(&ls);
1308
1309 ls.cr();
1310 ls.cr();
1311
1312 shenandoah_policy()->print_gc_stats(&ls);
1313
1314 ls.cr();
1315 ls.cr();
1316 }
1317 }
1318
1319 void ShenandoahHeap::verify(VerifyOption vo) {
1320 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1321 if (ShenandoahVerify) {
1322 verifier()->verify_generic(vo);
1323 } else {
1324 // TODO: Consider allocating verification bitmaps on demand,
1325 // and turn this on unconditionally.
1326 }
1327 }
1328 }
1329 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1330 return _free_set->capacity();
1331 }
1332
1333 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1334 private:
1335 MarkBitMap* _bitmap;
1336 ShenandoahScanObjectStack* _oop_stack;
1337 ShenandoahHeap* const _heap;
1338 ShenandoahMarkingContext* const _marking_context;
1620 if (start >= max) break;
1621
1622 for (size_t i = cur; i < end; i++) {
1623 ShenandoahHeapRegion* current = _heap->get_region(i);
1624 _blk->heap_region_do(current);
1625 }
1626 }
1627 }
1628 };
1629
1630 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1631 assert(blk->is_thread_safe(), "Only thread-safe closures here");
1632 if (num_regions() > ShenandoahParallelRegionStride) {
1633 ShenandoahParallelHeapRegionTask task(blk);
1634 workers()->run_task(&task);
1635 } else {
1636 heap_region_iterate(blk);
1637 }
1638 }
1639
1640 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1641 private:
1642 ShenandoahMarkingContext* const _ctx;
1643 public:
1644 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1645
1646 void heap_region_do(ShenandoahHeapRegion* r) {
1647 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1648 if (r->is_active()) {
1649 // Check if region needs updating its TAMS. We have updated it already during concurrent
1650 // reset, so it is very likely we don't need to do another write here.
1651 if (_ctx->top_at_mark_start(r) != r->top()) {
1652 _ctx->capture_top_at_mark_start(r);
1653 }
1654 } else {
1655 assert(_ctx->top_at_mark_start(r) == r->top(),
1656 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1657 }
1658 }
1659
1660 bool is_thread_safe() { return true; }
1661 };
1662
1663 class ShenandoahRendezvousClosure : public HandshakeClosure {
1664 public:
1665 inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1666 inline void do_thread(Thread* thread) {}
1667 };
1668
1669 void ShenandoahHeap::rendezvous_threads() {
1670 ShenandoahRendezvousClosure cl;
1671 Handshake::execute(&cl);
1672 }
1673
1674 void ShenandoahHeap::recycle_trash() {
1675 free_set()->recycle_trash();
1676 }
1677
1678 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1679 private:
1680 ShenandoahMarkingContext* const _ctx;
1681 public:
1682 ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1683
1684 void heap_region_do(ShenandoahHeapRegion* r) {
1685 if (r->is_active()) {
1686 // Reset live data and set TAMS optimistically. We would recheck these under the pause
1687 // anyway to capture any updates that happened since now.
1688 r->clear_live_data();
1689 _ctx->capture_top_at_mark_start(r);
1690 }
1691 }
1692
1693 bool is_thread_safe() { return true; }
1694 };
1695
1696 void ShenandoahHeap::prepare_gc() {
1697 reset_mark_bitmap();
1698
1699 ShenandoahResetUpdateRegionStateClosure cl;
1700 parallel_heap_region_iterate(&cl);
1701 }
1702
1703 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1704 private:
1705 ShenandoahMarkingContext* const _ctx;
1706 ShenandoahHeapLock* const _lock;
1707
1708 public:
1709 ShenandoahFinalMarkUpdateRegionStateClosure() :
1710 _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1711
1712 void heap_region_do(ShenandoahHeapRegion* r) {
1713 if (r->is_active()) {
1714 // All allocations past TAMS are implicitly live, adjust the region data.
1715 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1716 HeapWord *tams = _ctx->top_at_mark_start(r);
1717 HeapWord *top = r->top();
1718 if (top > tams) {
1719 r->increase_live_data_alloc_words(pointer_delta(top, tams));
1720 }
1721
1722 // We are about to select the collection set, make sure it knows about
1723 // current pinning status. Also, this allows trashing more regions that
1724 // now have their pinning status dropped.
1725 if (r->is_pinned()) {
1726 if (r->pin_count() == 0) {
1727 ShenandoahHeapLocker locker(_lock);
1728 r->make_unpinned();
1729 }
1730 } else {
1731 if (r->pin_count() > 0) {
1732 ShenandoahHeapLocker locker(_lock);
1733 r->make_pinned();
1734 }
1735 }
1736
1737 // Remember limit for updating refs. It's guaranteed that we get no
1738 // from-space-refs written from here on.
1739 r->set_update_watermark_at_safepoint(r->top());
1740 } else {
1741 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1742 assert(_ctx->top_at_mark_start(r) == r->top(),
1743 "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1744 }
1745 }
1746
1747 bool is_thread_safe() { return true; }
1748 };
1749
1750 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1751 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1752 {
1753 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1754 ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1755 ShenandoahFinalMarkUpdateRegionStateClosure cl;
1756 parallel_heap_region_iterate(&cl);
1757
1758 assert_pinned_region_status();
1759 }
1760
1761 {
1762 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1763 ShenandoahPhaseTimings::degen_gc_choose_cset);
1764 ShenandoahHeapLocker locker(lock());
1765 _collection_set->clear();
1766 heuristics()->choose_collection_set(_collection_set);
1767 }
1768
1769 {
1770 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1771 ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1772 ShenandoahHeapLocker locker(lock());
1773 _free_set->rebuild();
1774 }
1775 }
1776
1777 void ShenandoahHeap::do_class_unloading() {
1778 _unloader.unload();
1779 }
1780
1781 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1782 // Weak refs processing
1783 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1784 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1785 ShenandoahTimingsTracker t(phase);
1786 ShenandoahGCWorkerPhase worker_phase(phase);
1787 ref_processor()->process_references(phase, workers(), false /* concurrent */);
1788 }
1789
1790 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1791 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1792
1793 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1794 // make them parsable for update code to work correctly. Plus, we can compute new sizes
1795 // for future GCLABs here.
1796 if (UseTLAB) {
1797 ShenandoahGCPhase phase(concurrent ?
1798 ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1799 ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1800 gclabs_retire(ResizeTLAB);
1801 }
1802
1803 _update_refs_iterator.reset();
1804 }
1805
1806 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1807 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1808 if (_gc_state_changed) {
1809 _gc_state_changed = false;
1810 char state = gc_state();
1811 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1812 ShenandoahThreadLocalData::set_gc_state(t, state);
1813 }
1814 }
1815 }
1816
1817 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1818 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1819 _gc_state.set_cond(mask, value);
1820 _gc_state_changed = true;
1821 }
1822
1823 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1824 assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1825 set_gc_state(MARKING, in_progress);
1826 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1827 }
1828
1829 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1830 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1831 set_gc_state(EVACUATION, in_progress);
1832 }
1833
1834 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1835 if (in_progress) {
1836 _concurrent_strong_root_in_progress.set();
1837 } else {
1838 _concurrent_strong_root_in_progress.unset();
1839 }
1840 }
1841
1842 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1843 set_gc_state(WEAK_ROOTS, cond);
1844 }
1845
1846 GCTracer* ShenandoahHeap::tracer() {
1847 return shenandoah_policy()->tracer();
1848 }
1849
1850 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1851 return _free_set->used();
1852 }
1853
1854 bool ShenandoahHeap::try_cancel_gc() {
1855 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1856 return prev == CANCELLABLE;
1857 }
1858
1859 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1860 if (try_cancel_gc()) {
1861 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1862 log_info(gc)("%s", msg.buffer());
1863 Events::log(Thread::current(), "%s", msg.buffer());
1864 }
1865 }
1866
1867 uint ShenandoahHeap::max_workers() {
1868 return _max_workers;
1869 }
1870
1871 void ShenandoahHeap::stop() {
1872 // The shutdown sequence should be able to terminate when GC is running.
1873
1874 // Step 0. Notify policy to disable event recording.
1875 _shenandoah_policy->record_shutdown();
1876
1877 // Step 1. Notify control thread that we are in shutdown.
1878 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1879 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1880 control_thread()->prepare_for_graceful_shutdown();
1881
1882 // Step 2. Notify GC workers that we are cancelling GC.
1883 cancel_gc(GCCause::_shenandoah_stop_vm);
1884
1885 // Step 3. Wait until GC worker exits normally.
1886 control_thread()->stop();
1887 }
1888
1889 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1890 if (!unload_classes()) return;
1891 ClassUnloadingContext ctx(_workers->active_workers(),
1892 true /* unregister_nmethods_during_purge */,
1893 false /* lock_nmethod_free_separately */);
1894
1895 // Unload classes and purge SystemDictionary.
1896 {
1897 ShenandoahPhaseTimings::Phase phase = full_gc ?
1898 ShenandoahPhaseTimings::full_gc_purge_class_unload :
1899 ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1900 ShenandoahIsAliveSelector is_alive;
1901 {
1902 CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
1903 ShenandoahGCPhase gc_phase(phase);
1904 ShenandoahGCWorkerPhase worker_phase(phase);
1905 bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
1967 }
1968
1969 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1970 set_gc_state(HAS_FORWARDED, cond);
1971 }
1972
1973 void ShenandoahHeap::set_unload_classes(bool uc) {
1974 _unload_classes.set_cond(uc);
1975 }
1976
1977 bool ShenandoahHeap::unload_classes() const {
1978 return _unload_classes.is_set();
1979 }
1980
1981 address ShenandoahHeap::in_cset_fast_test_addr() {
1982 ShenandoahHeap* heap = ShenandoahHeap::heap();
1983 assert(heap->collection_set() != nullptr, "Sanity");
1984 return (address) heap->collection_set()->biased_map_address();
1985 }
1986
1987 size_t ShenandoahHeap::bytes_allocated_since_gc_start() const {
1988 return Atomic::load(&_bytes_allocated_since_gc_start);
1989 }
1990
1991 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1992 Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
1993 }
1994
1995 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1996 _degenerated_gc_in_progress.set_cond(in_progress);
1997 }
1998
1999 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2000 _full_gc_in_progress.set_cond(in_progress);
2001 }
2002
2003 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2004 assert (is_full_gc_in_progress(), "should be");
2005 _full_gc_move_in_progress.set_cond(in_progress);
2006 }
2007
2008 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2009 set_gc_state(UPDATEREFS, in_progress);
2010 }
2011
2012 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2036 if (r->is_active()) {
2037 if (r->is_pinned()) {
2038 if (r->pin_count() == 0) {
2039 r->make_unpinned();
2040 }
2041 } else {
2042 if (r->pin_count() > 0) {
2043 r->make_pinned();
2044 }
2045 }
2046 }
2047 }
2048
2049 assert_pinned_region_status();
2050 }
2051
2052 #ifdef ASSERT
2053 void ShenandoahHeap::assert_pinned_region_status() {
2054 for (size_t i = 0; i < num_regions(); i++) {
2055 ShenandoahHeapRegion* r = get_region(i);
2056 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2057 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2058 }
2059 }
2060 #endif
2061
2062 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2063 return _gc_timer;
2064 }
2065
2066 void ShenandoahHeap::prepare_concurrent_roots() {
2067 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2068 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2069 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2070 set_concurrent_weak_root_in_progress(true);
2071 if (unload_classes()) {
2072 _unloader.prepare();
2073 }
2074 }
2075
2076 void ShenandoahHeap::finish_concurrent_roots() {
2077 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2091 ParallelGCThreads, nworkers);
2092 } else {
2093 // Use ConcGCThreads outside safepoints
2094 assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2095 ConcGCThreads, nworkers);
2096 }
2097 }
2098 #endif
2099
2100 ShenandoahVerifier* ShenandoahHeap::verifier() {
2101 guarantee(ShenandoahVerify, "Should be enabled");
2102 assert (_verifier != nullptr, "sanity");
2103 return _verifier;
2104 }
2105
2106 template<bool CONCURRENT>
2107 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2108 private:
2109 ShenandoahHeap* _heap;
2110 ShenandoahRegionIterator* _regions;
2111 public:
2112 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2113 WorkerTask("Shenandoah Update References"),
2114 _heap(ShenandoahHeap::heap()),
2115 _regions(regions) {
2116 }
2117
2118 void work(uint worker_id) {
2119 if (CONCURRENT) {
2120 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2121 ShenandoahSuspendibleThreadSetJoiner stsj;
2122 do_work<ShenandoahConcUpdateRefsClosure>();
2123 } else {
2124 ShenandoahParallelWorkerSession worker_session(worker_id);
2125 do_work<ShenandoahSTWUpdateRefsClosure>();
2126 }
2127 }
2128
2129 private:
2130 template<class T>
2131 void do_work() {
2132 T cl;
2133 ShenandoahHeapRegion* r = _regions->next();
2134 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2135 while (r != nullptr) {
2136 HeapWord* update_watermark = r->get_update_watermark();
2137 assert (update_watermark >= r->bottom(), "sanity");
2138 if (r->is_active() && !r->is_cset()) {
2139 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2140 }
2141 if (ShenandoahPacing) {
2142 _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2143 }
2144 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2145 return;
2146 }
2147 r = _regions->next();
2148 }
2149 }
2150 };
2151
2152 void ShenandoahHeap::update_heap_references(bool concurrent) {
2153 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2154
2155 if (concurrent) {
2156 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2157 workers()->run_task(&task);
2158 } else {
2159 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2160 workers()->run_task(&task);
2161 }
2162 }
2163
2164
2165 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2166 private:
2167 ShenandoahHeapLock* const _lock;
2168
2169 public:
2170 ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2171
2172 void heap_region_do(ShenandoahHeapRegion* r) {
2173 // Drop unnecessary "pinned" state from regions that does not have CP marks
2174 // anymore, as this would allow trashing them.
2175
2176 if (r->is_active()) {
2177 if (r->is_pinned()) {
2178 if (r->pin_count() == 0) {
2179 ShenandoahHeapLocker locker(_lock);
2180 r->make_unpinned();
2181 }
2182 } else {
2183 if (r->pin_count() > 0) {
2184 ShenandoahHeapLocker locker(_lock);
2185 r->make_pinned();
2186 }
2187 }
2188 }
2189 }
2190
2191 bool is_thread_safe() { return true; }
2192 };
2193
2194 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2195 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2196 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2197
2198 {
2199 ShenandoahGCPhase phase(concurrent ?
2200 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2201 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2202 ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2203 parallel_heap_region_iterate(&cl);
2204
2205 assert_pinned_region_status();
2206 }
2207
2208 {
2209 ShenandoahGCPhase phase(concurrent ?
2210 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2211 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2212 trash_cset_regions();
2213 }
2214 }
2215
2216 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2217 {
2218 ShenandoahGCPhase phase(concurrent ?
2219 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2220 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2221 ShenandoahHeapLocker locker(lock());
2222 _free_set->rebuild();
2223 }
2224 }
2225
2226 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2227 print_on(st);
2228 st->cr();
2229 print_heap_regions_on(st);
2230 }
2231
2232 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2233 size_t slice = r->index() / _bitmap_regions_per_slice;
2234
2235 size_t regions_from = _bitmap_regions_per_slice * slice;
2236 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2237 for (size_t g = regions_from; g < regions_to; g++) {
2238 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2239 if (skip_self && g == r->index()) continue;
2240 if (get_region(g)->is_committed()) {
2241 return true;
2242 }
2325 void ShenandoahHeap::initialize_serviceability() {
2326 _memory_pool = new ShenandoahMemoryPool(this);
2327 _cycle_memory_manager.add_pool(_memory_pool);
2328 _stw_memory_manager.add_pool(_memory_pool);
2329 }
2330
2331 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2332 GrowableArray<GCMemoryManager*> memory_managers(2);
2333 memory_managers.append(&_cycle_memory_manager);
2334 memory_managers.append(&_stw_memory_manager);
2335 return memory_managers;
2336 }
2337
2338 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2339 GrowableArray<MemoryPool*> memory_pools(1);
2340 memory_pools.append(_memory_pool);
2341 return memory_pools;
2342 }
2343
2344 MemoryUsage ShenandoahHeap::memory_usage() {
2345 return _memory_pool->get_memory_usage();
2346 }
2347
2348 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2349 _heap(ShenandoahHeap::heap()),
2350 _index(0) {}
2351
2352 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2353 _heap(heap),
2354 _index(0) {}
2355
2356 void ShenandoahRegionIterator::reset() {
2357 _index = 0;
2358 }
2359
2360 bool ShenandoahRegionIterator::has_next() const {
2361 return _index < _heap->num_regions();
2362 }
2363
2364 char ShenandoahHeap::gc_state() const {
2365 return _gc_state.raw_value();
2366 }
2367
2368 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2369 #ifdef ASSERT
2370 assert(_liveness_cache != nullptr, "sanity");
2371 assert(worker_id < _max_workers, "sanity");
2372 for (uint i = 0; i < num_regions(); i++) {
2373 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2374 }
2375 #endif
2376 return _liveness_cache[worker_id];
2377 }
2378
2379 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2380 assert(worker_id < _max_workers, "sanity");
2381 assert(_liveness_cache != nullptr, "sanity");
2382 ShenandoahLiveData* ld = _liveness_cache[worker_id];
2383 for (uint i = 0; i < num_regions(); i++) {
2384 ShenandoahLiveData live = ld[i];
2385 if (live > 0) {
2386 ShenandoahHeapRegion* r = get_region(i);
2387 r->increase_live_data_gc_words(live);
2388 ld[i] = 0;
2389 }
2390 }
2391 }
2392
2393 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2394 if (is_idle()) return false;
2395
2396 // Objects allocated after marking start are implicitly alive, don't need any barriers during
2397 // marking phase.
2398 if (is_concurrent_mark_in_progress() &&
2399 !marking_context()->allocated_after_mark_start(obj)) {
2400 return true;
2401 }
2402
2403 // Can not guarantee obj is deeply good.
2404 if (has_forwarded_objects()) {
2405 return true;
2406 }
2407
2408 return false;
2409 }
|
1 /*
2 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "memory/allocation.hpp"
29 #include "memory/universe.hpp"
30
31 #include "gc/shared/classUnloadingContext.hpp"
32 #include "gc/shared/gcArguments.hpp"
33 #include "gc/shared/gcTimer.hpp"
34 #include "gc/shared/gcTraceTime.inline.hpp"
35 #include "gc/shared/locationPrinter.inline.hpp"
36 #include "gc/shared/memAllocator.hpp"
37 #include "gc/shared/plab.hpp"
38 #include "gc/shared/tlab_globals.hpp"
39
40 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
41 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
42 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
43 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
44 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
45 #include "gc/shenandoah/shenandoahCardTable.hpp"
46 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
47 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
48 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
49 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
50 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
51 #include "gc/shenandoah/shenandoahControlThread.hpp"
52 #include "gc/shenandoah/shenandoahFreeSet.hpp"
53 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
54 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
55 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
56 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
57 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
58 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
59 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
60 #include "gc/shenandoah/shenandoahInitLogger.hpp"
61 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
62 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
63 #include "gc/shenandoah/shenandoahMetrics.hpp"
64 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
65 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
66 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
67 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
68 #include "gc/shenandoah/shenandoahPadding.hpp"
69 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
70 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
71 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
72 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
73 #include "gc/shenandoah/shenandoahSTWMark.hpp"
74 #include "gc/shenandoah/shenandoahUtils.hpp"
75 #include "gc/shenandoah/shenandoahVerifier.hpp"
76 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
77 #include "gc/shenandoah/shenandoahVMOperations.hpp"
78 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
79 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
80 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
81 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
82 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
83 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
84 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
85 #include "utilities/globalDefinitions.hpp"
86
87 #if INCLUDE_JFR
88 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
89 #endif
90
91 #include "classfile/systemDictionary.hpp"
92 #include "code/codeCache.hpp"
93 #include "memory/classLoaderMetaspace.hpp"
94 #include "memory/metaspaceUtils.hpp"
95 #include "nmt/mallocTracker.hpp"
96 #include "nmt/memTracker.hpp"
97 #include "oops/compressedOops.inline.hpp"
98 #include "prims/jvmtiTagMap.hpp"
99 #include "runtime/atomic.hpp"
100 #include "runtime/globals.hpp"
101 #include "runtime/interfaceSupport.inline.hpp"
102 #include "runtime/java.hpp"
103 #include "runtime/orderAccess.hpp"
104 #include "runtime/safepointMechanism.hpp"
105 #include "runtime/stackWatermarkSet.hpp"
106 #include "runtime/vmThread.hpp"
159 jint ShenandoahHeap::initialize() {
160 //
161 // Figure out heap sizing
162 //
163
164 size_t init_byte_size = InitialHeapSize;
165 size_t min_byte_size = MinHeapSize;
166 size_t max_byte_size = MaxHeapSize;
167 size_t heap_alignment = HeapAlignment;
168
169 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
170
171 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
172 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
173
174 _num_regions = ShenandoahHeapRegion::region_count();
175 assert(_num_regions == (max_byte_size / reg_size_bytes),
176 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
177 _num_regions, max_byte_size, reg_size_bytes);
178
179 size_t num_committed_regions = init_byte_size / reg_size_bytes;
180 num_committed_regions = MIN2(num_committed_regions, _num_regions);
181 assert(num_committed_regions <= _num_regions, "sanity");
182 _initial_size = num_committed_regions * reg_size_bytes;
183
184 size_t num_min_regions = min_byte_size / reg_size_bytes;
185 num_min_regions = MIN2(num_min_regions, _num_regions);
186 assert(num_min_regions <= _num_regions, "sanity");
187 _minimum_size = num_min_regions * reg_size_bytes;
188
189 // Default to max heap size.
190 _soft_max_size = _num_regions * reg_size_bytes;
191
192 _committed = _initial_size;
193
194 // Now we know the number of regions and heap sizes, initialize the heuristics.
195 initialize_heuristics_generations();
196
197 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
198 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
199 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
200
201 //
202 // Reserve and commit memory for heap
203 //
204
205 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
206 initialize_reserved_region(heap_rs);
207 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
208 _heap_region_special = heap_rs.special();
209
210 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
211 "Misaligned heap: " PTR_FORMAT, p2i(base()));
212 os::trace_page_sizes_for_requested_size("Heap",
213 max_byte_size, heap_alignment,
214 heap_rs.base(),
215 heap_rs.size(), heap_rs.page_size());
216
217 #if SHENANDOAH_OPTIMIZED_MARKTASK
218 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
219 // Fail if we ever attempt to address more than we can.
220 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
221 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
222 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
223 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
224 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
225 vm_exit_during_initialization("Fatal Error", buf);
226 }
227 #endif
228
229 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
230 if (!_heap_region_special) {
231 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
232 "Cannot commit heap memory");
233 }
234
235 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
236
237 //
238 // After reserving the Java heap, create the card table, barriers, and workers, in dependency order
239 //
240 if (mode()->is_generational()) {
241 ShenandoahDirectCardMarkRememberedSet *rs;
242 ShenandoahCardTable* card_table = ShenandoahBarrierSet::barrier_set()->card_table();
243 size_t card_count = card_table->cards_required(heap_rs.size() / HeapWordSize);
244 rs = new ShenandoahDirectCardMarkRememberedSet(ShenandoahBarrierSet::barrier_set()->card_table(), card_count);
245 _card_scan = new ShenandoahScanRemembered<ShenandoahDirectCardMarkRememberedSet>(rs);
246
247 // Age census structure
248 _age_census = new ShenandoahAgeCensus();
249 }
250
251 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
252 if (_workers == nullptr) {
253 vm_exit_during_initialization("Failed necessary allocation.");
254 } else {
255 _workers->initialize_workers();
256 }
257
258 if (ParallelGCThreads > 1) {
259 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
260 _safepoint_workers->initialize_workers();
261 }
262
263 //
264 // Reserve and commit memory for bitmap(s)
265 //
266
267 size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
268 _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
269
270 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
271
272 guarantee(bitmap_bytes_per_region != 0,
273 "Bitmap bytes per region should not be zero");
274 guarantee(is_power_of_2(bitmap_bytes_per_region),
275 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
276
277 if (bitmap_page_size > bitmap_bytes_per_region) {
278 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
279 _bitmap_bytes_per_slice = bitmap_page_size;
280 } else {
281 _bitmap_regions_per_slice = 1;
282 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
283 }
284
285 guarantee(_bitmap_regions_per_slice >= 1,
286 "Should have at least one region per slice: " SIZE_FORMAT,
287 _bitmap_regions_per_slice);
288
289 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
290 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
291 _bitmap_bytes_per_slice, bitmap_page_size);
292
293 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
294 os::trace_page_sizes_for_requested_size("Mark Bitmap",
295 bitmap_size_orig, bitmap_page_size,
296 bitmap.base(),
297 bitmap.size(), bitmap.page_size());
298 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
299 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
300 _bitmap_region_special = bitmap.special();
301
302 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
303 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
304 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
305 if (!_bitmap_region_special) {
306 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
307 "Cannot commit bitmap memory");
308 }
309
310 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
311
312 if (ShenandoahVerify) {
313 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
314 os::trace_page_sizes_for_requested_size("Verify Bitmap",
315 bitmap_size_orig, bitmap_page_size,
316 verify_bitmap.base(),
317 verify_bitmap.size(), verify_bitmap.page_size());
318 if (!verify_bitmap.special()) {
319 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
320 "Cannot commit verification bitmap memory");
321 }
322 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
323 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
324 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
325 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
326 }
327
328 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
329 size_t aux_bitmap_page_size = bitmap_page_size;
330 #ifdef LINUX
381 assert(is_aligned(req_addr, cset_align), "Should be aligned");
382 cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
383 if (cset_rs.is_reserved()) {
384 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
385 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
386 break;
387 }
388 }
389
390 if (_collection_set == nullptr) {
391 cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
392 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
393 }
394 os::trace_page_sizes_for_requested_size("Collection Set",
395 cset_size, cset_page_size,
396 cset_rs.base(),
397 cset_rs.size(), cset_rs.page_size());
398 }
399
400 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
401 _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
402 _free_set = new ShenandoahFreeSet(this, _num_regions);
403
404 {
405 ShenandoahHeapLocker locker(lock());
406
407
408 for (size_t i = 0; i < _num_regions; i++) {
409 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
410 bool is_committed = i < num_committed_regions;
411 void* loc = region_storage.base() + i * region_align;
412
413 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
414 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
415
416 _marking_context->initialize_top_at_mark_start(r);
417 _regions[i] = r;
418 assert(!collection_set()->is_in(i), "New region should not be in collection set");
419
420 _affiliations[i] = ShenandoahAffiliation::FREE;
421 }
422
423 // Initialize to complete
424 _marking_context->mark_complete();
425 size_t young_cset_regions, old_cset_regions;
426
427 // We are initializing free set. We ignore cset region tallies.
428 size_t first_old, last_old, num_old;
429 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
430 _free_set->rebuild(young_cset_regions, old_cset_regions);
431 }
432
433 if (AlwaysPreTouch) {
434 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
435 // before initialize() below zeroes it with initializing thread. For any given region,
436 // we touch the region and the corresponding bitmaps from the same thread.
437 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
438
439 _pretouch_heap_page_size = heap_page_size;
440 _pretouch_bitmap_page_size = bitmap_page_size;
441
442 #ifdef LINUX
443 // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
444 // pages. But, the kernel needs to know that every small page is used, in order to coalesce
445 // them into huge one. Therefore, we need to pretouch with smaller pages.
446 if (UseTransparentHugePages) {
447 _pretouch_heap_page_size = (size_t)os::vm_page_size();
448 _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
449 }
450 #endif
469 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
470 }
471
472 // There should probably be Shenandoah-specific options for these,
473 // just as there are G1-specific options.
474 {
475 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
476 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
477 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
478 }
479
480 _monitoring_support = new ShenandoahMonitoringSupport(this);
481 _phase_timings = new ShenandoahPhaseTimings(max_workers());
482 ShenandoahCodeRoots::initialize();
483
484 if (ShenandoahPacing) {
485 _pacer = new ShenandoahPacer(this);
486 _pacer->setup_for_idle();
487 }
488
489 initialize_controller();
490
491 print_init_logger();
492
493 return JNI_OK;
494 }
495
496 void ShenandoahHeap::initialize_controller() {
497 _control_thread = new ShenandoahControlThread();
498 }
499
500 void ShenandoahHeap::print_init_logger() const {
501 ShenandoahInitLogger::print();
502 }
503
504 void ShenandoahHeap::initialize_heuristics_generations() {
505 if (ShenandoahGCMode != nullptr) {
506 if (strcmp(ShenandoahGCMode, "satb") == 0) {
507 _gc_mode = new ShenandoahSATBMode();
508 } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
509 _gc_mode = new ShenandoahIUMode();
510 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
511 _gc_mode = new ShenandoahPassiveMode();
512 } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
513 _gc_mode = new ShenandoahGenerationalMode();
514 } else {
515 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
516 }
517 } else {
518 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
519 }
520 _gc_mode->initialize_flags();
521 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
522 vm_exit_during_initialization(
523 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
524 _gc_mode->name()));
525 }
526 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
527 vm_exit_during_initialization(
528 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
529 _gc_mode->name()));
530 }
531
532 // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity
533 // for old would be total heap - minimum capacity of young. This means the sum of the maximum
534 // allowed for old and young could exceed the total heap size. It remains the case that the
535 // _actual_ capacity of young + old = total.
536 _generation_sizer.heap_size_changed(max_capacity());
537 size_t initial_capacity_young = _generation_sizer.max_young_size();
538 size_t max_capacity_young = _generation_sizer.max_young_size();
539 size_t initial_capacity_old = max_capacity() - max_capacity_young;
540 size_t max_capacity_old = max_capacity() - initial_capacity_young;
541
542 _young_generation = new ShenandoahYoungGeneration(_max_workers, max_capacity_young, initial_capacity_young);
543 _old_generation = new ShenandoahOldGeneration(_max_workers, max_capacity_old, initial_capacity_old);
544 _global_generation = new ShenandoahGlobalGeneration(_gc_mode->is_generational(), _max_workers, max_capacity(), max_capacity());
545 _global_generation->initialize_heuristics(_gc_mode);
546 if (mode()->is_generational()) {
547 _young_generation->initialize_heuristics(_gc_mode);
548 _old_generation->initialize_heuristics(_gc_mode);
549 }
550 _evac_tracker = new ShenandoahEvacuationTracker(mode()->is_generational());
551 }
552
553 #ifdef _MSC_VER
554 #pragma warning( push )
555 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
556 #endif
557
558 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
559 CollectedHeap(),
560 _gc_generation(nullptr),
561 _initial_size(0),
562 _committed(0),
563 _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
564 _workers(nullptr),
565 _safepoint_workers(nullptr),
566 _heap_region_special(false),
567 _num_regions(0),
568 _regions(nullptr),
569 _affiliations(nullptr),
570 _update_refs_iterator(this),
571 _gc_state_changed(false),
572 _gc_no_progress_count(0),
573 _age_census(nullptr),
574 _cancel_requested_time(0),
575 _young_generation(nullptr),
576 _global_generation(nullptr),
577 _old_generation(nullptr),
578 _control_thread(nullptr),
579 _shenandoah_policy(policy),
580 _free_set(nullptr),
581 _pacer(nullptr),
582 _verifier(nullptr),
583 _phase_timings(nullptr),
584 _evac_tracker(nullptr),
585 _mmu_tracker(),
586 _generation_sizer(),
587 _monitoring_support(nullptr),
588 _memory_pool(nullptr),
589 _stw_memory_manager("Shenandoah Pauses"),
590 _cycle_memory_manager("Shenandoah Cycles"),
591 _gc_timer(new ConcurrentGCTimer()),
592 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
593 _marking_context(nullptr),
594 _bitmap_size(0),
595 _bitmap_regions_per_slice(0),
596 _bitmap_bytes_per_slice(0),
597 _bitmap_region_special(false),
598 _aux_bitmap_region_special(false),
599 _liveness_cache(nullptr),
600 _collection_set(nullptr),
601 _card_scan(nullptr)
602 {
603 }
604
605 #ifdef _MSC_VER
606 #pragma warning( pop )
607 #endif
608
609 void ShenandoahHeap::print_on(outputStream* st) const {
610 st->print_cr("Shenandoah Heap");
611 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
612 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
613 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
614 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
615 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
616 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
617 num_regions(),
618 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
619 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
620
621 st->print("Status: ");
622 if (has_forwarded_objects()) st->print("has forwarded objects, ");
623 if (is_concurrent_old_mark_in_progress()) st->print("old marking, ");
624 if (is_concurrent_young_mark_in_progress()) st->print("young marking, ");
625 if (is_evacuation_in_progress()) st->print("evacuating, ");
626 if (is_update_refs_in_progress()) st->print("updating refs, ");
627 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
628 if (is_full_gc_in_progress()) st->print("full gc, ");
629 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
630 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
631 if (is_concurrent_strong_root_in_progress() &&
632 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
633
634 if (cancelled_gc()) {
635 st->print("cancelled");
636 } else {
637 st->print("not cancelled");
638 }
639 st->cr();
640
641 st->print_cr("Reserved region:");
642 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
643 p2i(reserved_region().start()),
644 p2i(reserved_region().end()));
655 st->cr();
656 MetaspaceUtils::print_on(st);
657
658 if (Verbose) {
659 st->cr();
660 print_heap_regions_on(st);
661 }
662 }
663
664 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
665 public:
666 void do_thread(Thread* thread) {
667 assert(thread != nullptr, "Sanity");
668 assert(thread->is_Worker_thread(), "Only worker thread expected");
669 ShenandoahThreadLocalData::initialize_gclab(thread);
670 }
671 };
672
673 void ShenandoahHeap::post_initialize() {
674 CollectedHeap::post_initialize();
675 _mmu_tracker.initialize();
676
677 MutexLocker ml(Threads_lock);
678
679 ShenandoahInitWorkerGCLABClosure init_gclabs;
680 _workers->threads_do(&init_gclabs);
681
682 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
683 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
684 _workers->set_initialize_gclab();
685 if (_safepoint_workers != nullptr) {
686 _safepoint_workers->threads_do(&init_gclabs);
687 _safepoint_workers->set_initialize_gclab();
688 }
689
690 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
691 }
692
693 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
694 return _global_generation->heuristics();
695 }
696
697 size_t ShenandoahHeap::used() const {
698 return global_generation()->used();
699 }
700
701 size_t ShenandoahHeap::committed() const {
702 return Atomic::load(&_committed);
703 }
704
705 void ShenandoahHeap::increase_committed(size_t bytes) {
706 shenandoah_assert_heaplocked_or_safepoint();
707 _committed += bytes;
708 }
709
710 void ShenandoahHeap::decrease_committed(size_t bytes) {
711 shenandoah_assert_heaplocked_or_safepoint();
712 _committed -= bytes;
713 }
714
715 // For tracking usage based on allocations, it should be the case that:
716 // * The sum of regions::used == heap::used
717 // * The sum of a generation's regions::used == generation::used
718 // * The sum of a generation's humongous regions::free == generation::humongous_waste
719 // These invariants are checked by the verifier on GC safepoints.
720 //
721 // Additional notes:
722 // * When a mutator's allocation request causes a region to be retired, the
723 // free memory left in that region is considered waste. It does not contribute
724 // to the usage, but it _does_ contribute to allocation rate.
725 // * The bottom of a PLAB must be aligned on card size. In some cases this will
726 // require padding in front of the PLAB (a filler object). Because this padding
727 // is included in the region's used memory we include the padding in the usage
728 // accounting as waste.
729 // * Mutator allocations are used to compute an allocation rate. They are also
730 // sent to the Pacer for those purposes.
731 // * There are three sources of waste:
732 // 1. The padding used to align a PLAB on card size
733 // 2. Region's free is less than minimum TLAB size and is retired
734 // 3. The unused portion of memory in the last region of a humongous object
735 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
736 size_t actual_bytes = req.actual_size() * HeapWordSize;
737 size_t wasted_bytes = req.waste() * HeapWordSize;
738 ShenandoahGeneration* generation = generation_for(req.affiliation());
739
740 if (req.is_gc_alloc()) {
741 assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
742 increase_used(generation, actual_bytes + wasted_bytes);
743 } else {
744 assert(req.is_mutator_alloc(), "Expected mutator alloc here");
745 // padding and actual size both count towards allocation counter
746 generation->increase_allocated(actual_bytes + wasted_bytes);
747
748 // only actual size counts toward usage for mutator allocations
749 increase_used(generation, actual_bytes);
750
751 // notify pacer of both actual size and waste
752 notify_mutator_alloc_words(req.actual_size(), req.waste());
753
754 if (wasted_bytes > 0 && req.actual_size() > ShenandoahHeapRegion::humongous_threshold_words()) {
755 increase_humongous_waste(generation,wasted_bytes);
756 }
757 }
758 }
759
760 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
761 generation->increase_humongous_waste(bytes);
762 if (!generation->is_global()) {
763 global_generation()->increase_humongous_waste(bytes);
764 }
765 }
766
767 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
768 generation->decrease_humongous_waste(bytes);
769 if (!generation->is_global()) {
770 global_generation()->decrease_humongous_waste(bytes);
771 }
772 }
773
774 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
775 generation->increase_used(bytes);
776 if (!generation->is_global()) {
777 global_generation()->increase_used(bytes);
778 }
779 }
780
781 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
782 generation->decrease_used(bytes);
783 if (!generation->is_global()) {
784 global_generation()->decrease_used(bytes);
785 }
786 }
787
788 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
789 if (ShenandoahPacing) {
790 control_thread()->pacing_notify_alloc(words);
791 if (waste > 0) {
792 pacer()->claim_for_alloc(waste, true);
793 }
794 }
795 }
796
797 size_t ShenandoahHeap::capacity() const {
798 return committed();
799 }
800
801 size_t ShenandoahHeap::max_capacity() const {
802 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
803 }
804
805 size_t ShenandoahHeap::soft_max_capacity() const {
806 size_t v = Atomic::load(&_soft_max_size);
807 assert(min_capacity() <= v && v <= max_capacity(),
808 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
809 min_capacity(), v, max_capacity());
810 return v;
811 }
812
813 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
814 assert(min_capacity() <= v && v <= max_capacity(),
815 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
816 min_capacity(), v, max_capacity());
817 Atomic::store(&_soft_max_size, v);
818 }
819
820 size_t ShenandoahHeap::min_capacity() const {
821 return _minimum_size;
822 }
823
824 size_t ShenandoahHeap::initial_capacity() const {
825 return _initial_size;
826 }
827
828 void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
829 assert (ShenandoahUncommit, "should be enabled");
830
831 // Determine if there is work to do. This avoids taking heap lock if there is
832 // no work available, avoids spamming logs with superfluous logging messages,
833 // and minimises the amount of work while locks are taken.
834
835 if (committed() <= shrink_until) return;
836
837 bool has_work = false;
838 for (size_t i = 0; i < num_regions(); i++) {
839 ShenandoahHeapRegion* r = get_region(i);
840 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
841 has_work = true;
842 break;
843 }
844 }
845
846 if (has_work) {
847 static const char* msg = "Concurrent uncommit";
887 size_t old_soft_max = soft_max_capacity();
888 if (new_soft_max != old_soft_max) {
889 new_soft_max = MAX2(min_capacity(), new_soft_max);
890 new_soft_max = MIN2(max_capacity(), new_soft_max);
891 if (new_soft_max != old_soft_max) {
892 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
893 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
894 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
895 );
896 set_soft_max_capacity(new_soft_max);
897 return true;
898 }
899 }
900 return false;
901 }
902
903 void ShenandoahHeap::notify_heap_changed() {
904 // Update monitoring counters when we took a new region. This amortizes the
905 // update costs on slow path.
906 monitoring_support()->notify_heap_changed();
907 _heap_changed.set();
908 }
909
910 void ShenandoahHeap::set_forced_counters_update(bool value) {
911 monitoring_support()->set_forced_counters_update(value);
912 }
913
914 void ShenandoahHeap::handle_force_counters_update() {
915 monitoring_support()->handle_force_counters_update();
916 }
917
918 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
919 // New object should fit the GCLAB size
920 size_t min_size = MAX2(size, PLAB::min_size());
921
922 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
923 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
924
925 // Limit growth of GCLABs to ShenandoahMaxEvacLABRatio * the minimum size. This enables more equitable distribution of
926 // available evacuation buidget between the many threads that are coordinating in the evacuation effort.
927 if (ShenandoahMaxEvacLABRatio > 0) {
928 log_debug(gc, free)("Allocate new gclab: " SIZE_FORMAT ", " SIZE_FORMAT, new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
929 new_size = MIN2(new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
930 }
931
932 new_size = MIN2(new_size, PLAB::max_size());
933 new_size = MAX2(new_size, PLAB::min_size());
934
935 // Record new heuristic value even if we take any shortcut. This captures
936 // the case when moderately-sized objects always take a shortcut. At some point,
937 // heuristics should catch up with them.
938 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
939
940 if (new_size < size) {
941 // New size still does not fit the object. Fall back to shared allocation.
942 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
943 log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
944 return nullptr;
945 }
946
947 // Retire current GCLAB, and allocate a new one.
948 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
949 gclab->retire();
950
951 size_t actual_size = 0;
952 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
953 if (gclab_buf == nullptr) {
954 return nullptr;
955 }
956
957 assert (size <= actual_size, "allocation should fit");
958
959 // ...and clear or zap just allocated TLAB, if needed.
960 if (ZeroTLAB) {
961 Copy::zero_to_words(gclab_buf, actual_size);
962 } else if (ZapTLAB) {
963 // Skip mangling the space corresponding to the object header to
964 // ensure that the returned space is not considered parsable by
965 // any concurrent GC thread.
966 size_t hdr_size = oopDesc::header_size();
967 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
968 }
969 gclab->set_buf(gclab_buf, actual_size);
970 return gclab->allocate(size);
971 }
972
973 void ShenandoahHeap::cancel_old_gc() {
974 shenandoah_assert_safepoint();
975 assert(old_generation() != nullptr, "Should only have mixed collections in generation mode.");
976 if (old_generation()->is_idle()) {
977 #ifdef ASSERT
978 old_generation()->validate_waiting_for_bootstrap();
979 #endif
980 } else {
981 log_info(gc)("Terminating old gc cycle.");
982 // Stop marking
983 old_generation()->cancel_marking();
984 // Stop tracking old regions
985 old_generation()->abandon_collection_candidates();
986 // Remove old generation access to young generation mark queues
987 young_generation()->set_old_gen_task_queues(nullptr);
988 // Transition to IDLE now.
989 old_generation()->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
990 }
991 }
992
993 // Called from stubs in JIT code or interpreter
994 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
995 size_t requested_size,
996 size_t* actual_size) {
997 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
998 HeapWord* res = allocate_memory(req);
999 if (res != nullptr) {
1000 *actual_size = req.actual_size();
1001 } else {
1002 *actual_size = 0;
1003 }
1004 return res;
1005 }
1006
1007 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
1008 size_t word_size,
1009 size_t* actual_size) {
1010 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
1011 HeapWord* res = allocate_memory(req);
1012 if (res != nullptr) {
1013 *actual_size = req.actual_size();
1014 } else {
1015 *actual_size = 0;
1016 }
1017 return res;
1018 }
1019
1020
1021 // is_promotion is true iff this allocation is known for sure to hold the result of young-gen evacuation
1022 // to old-gen. plab allocates are not known as such, since they may hold old-gen evacuations.
1023 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
1024 intptr_t pacer_epoch = 0;
1025 bool in_new_region = false;
1026 HeapWord* result = nullptr;
1027
1028 if (req.is_mutator_alloc()) {
1029 if (ShenandoahPacing) {
1030 pacer()->pace_for_alloc(req.size());
1031 pacer_epoch = pacer()->epoch();
1032 }
1033
1034 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
1035 result = allocate_memory_under_lock(req, in_new_region);
1036 }
1037
1038 // Check that gc overhead is not exceeded.
1039 //
1040 // Shenandoah will grind along for quite a while allocating one
1041 // object at a time using shared (non-tlab) allocations. This check
1042 // is testing that the GC overhead limit has not been exceeded.
1043 // This will notify the collector to start a cycle, but will raise
1044 // an OOME to the mutator if the last Full GCs have not made progress.
1045 if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
1046 control_thread()->handle_alloc_failure(req, false);
1047 return nullptr;
1048 }
1049
1050 // Block until control thread reacted, then retry allocation.
1051 //
1052 // It might happen that one of the threads requesting allocation would unblock
1053 // way later after GC happened, only to fail the second allocation, because
1054 // other threads have already depleted the free storage. In this case, a better
1055 // strategy is to try again, as long as GC makes progress (or until at least
1056 // one full GC has completed).
1057 size_t original_count = shenandoah_policy()->full_gc_count();
1058 while (result == nullptr
1059 && (get_gc_no_progress_count() == 0 || original_count == shenandoah_policy()->full_gc_count())) {
1060 control_thread()->handle_alloc_failure(req, true);
1061 result = allocate_memory_under_lock(req, in_new_region);
1062 }
1063
1064 if (log_is_enabled(Debug, gc, alloc)) {
1065 ResourceMark rm;
1066 log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
1067 Thread::current()->name(), p2i(result), req.type_string(), req.size(), original_count, get_gc_no_progress_count());
1068 }
1069
1070 } else {
1071 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1072 result = allocate_memory_under_lock(req, in_new_region);
1073 // Do not call handle_alloc_failure() here, because we cannot block.
1074 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1075 }
1076
1077 if (in_new_region) {
1078 notify_heap_changed();
1079 }
1080
1081 if (result == nullptr) {
1082 req.set_actual_size(0);
1083 }
1084
1085 // This is called regardless of the outcome of the allocation to account
1086 // for any waste created by retiring regions with this request.
1087 increase_used(req);
1088
1089 if (result != nullptr) {
1090 size_t requested = req.size();
1091 size_t actual = req.actual_size();
1092
1093 assert (req.is_lab_alloc() || (requested == actual),
1094 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1095 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1096
1097 if (req.is_mutator_alloc()) {
1098 // If we requested more than we were granted, give the rest back to pacer.
1099 // This only matters if we are in the same pacing epoch: do not try to unpace
1100 // over the budget for the other phase.
1101 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1102 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1103 }
1104 }
1105 }
1106
1107 return result;
1108 }
1109
1110 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1111 bool try_smaller_lab_size = false;
1112 size_t smaller_lab_size;
1113 {
1114 // promotion_eligible pertains only to PLAB allocations, denoting that the PLAB is allowed to allocate for promotions.
1115 bool promotion_eligible = false;
1116 bool allow_allocation = true;
1117 bool plab_alloc = false;
1118 size_t requested_bytes = req.size() * HeapWordSize;
1119 HeapWord* result = nullptr;
1120
1121 // If we are dealing with mutator allocation, then we may need to block for safepoint.
1122 // We cannot block for safepoint for GC allocations, because there is a high chance
1123 // we are already running at safepoint or from stack watermark machinery, and we cannot
1124 // block again.
1125 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1126 Thread* thread = Thread::current();
1127
1128 if (mode()->is_generational()) {
1129 if (req.affiliation() == YOUNG_GENERATION) {
1130 if (req.is_mutator_alloc()) {
1131 size_t young_words_available = young_generation()->available() / HeapWordSize;
1132 if (req.is_lab_alloc() && (req.min_size() < young_words_available)) {
1133 // Allow ourselves to try a smaller lab size even if requested_bytes <= young_available. We may need a smaller
1134 // lab size because young memory has become too fragmented.
1135 try_smaller_lab_size = true;
1136 smaller_lab_size = (young_words_available < req.size())? young_words_available: req.size();
1137 } else if (req.size() > young_words_available) {
1138 // Can't allocate because even min_size() is larger than remaining young_available
1139 log_info(gc, ergo)("Unable to shrink %s alloc request of minimum size: " SIZE_FORMAT
1140 ", young words available: " SIZE_FORMAT, req.type_string(),
1141 HeapWordSize * (req.is_lab_alloc()? req.min_size(): req.size()), young_words_available);
1142 return nullptr;
1143 }
1144 }
1145 } else { // reg.affiliation() == OLD_GENERATION
1146 assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "GCLAB pertains only to young-gen memory");
1147 if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
1148 plab_alloc = true;
1149 size_t promotion_avail = old_generation()->get_promoted_reserve();
1150 size_t promotion_expended = old_generation()->get_promoted_expended();
1151 if (promotion_expended + requested_bytes > promotion_avail) {
1152 promotion_avail = 0;
1153 if (old_generation()->get_evacuation_reserve() == 0) {
1154 // There are no old-gen evacuations in this pass. There's no value in creating a plab that cannot
1155 // be used for promotions.
1156 allow_allocation = false;
1157 }
1158 } else {
1159 promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1160 promotion_eligible = true;
1161 }
1162 } else if (req.is_promotion()) {
1163 // This is a shared alloc for promotion
1164 size_t promotion_avail = old_generation()->get_promoted_reserve();
1165 size_t promotion_expended = old_generation()->get_promoted_expended();
1166 if (promotion_expended + requested_bytes > promotion_avail) {
1167 promotion_avail = 0;
1168 } else {
1169 promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1170 }
1171 if (promotion_avail == 0) {
1172 // We need to reserve the remaining memory for evacuation. Reject this allocation. The object will be
1173 // evacuated to young-gen memory and promoted during a future GC pass.
1174 return nullptr;
1175 }
1176 // Else, we'll allow the allocation to proceed. (Since we hold heap lock, the tested condition remains true.)
1177 } else {
1178 // This is a shared allocation for evacuation. Memory has already been reserved for this purpose.
1179 }
1180 }
1181 } // This ends the is_generational() block
1182
1183 // First try the original request. If TLAB request size is greater than available, allocate() will attempt to downsize
1184 // request to fit within available memory.
1185 result = (allow_allocation)? _free_set->allocate(req, in_new_region): nullptr;
1186 if (result != nullptr) {
1187 if (req.is_old()) {
1188 ShenandoahThreadLocalData::reset_plab_promoted(thread);
1189 if (req.is_gc_alloc()) {
1190 bool disable_plab_promotions = false;
1191 if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
1192 if (promotion_eligible) {
1193 size_t actual_size = req.actual_size() * HeapWordSize;
1194 // The actual size of the allocation may be larger than the requested bytes (due to alignment on card boundaries).
1195 // If this puts us over our promotion budget, we need to disable future PLAB promotions for this thread.
1196 if (old_generation()->get_promoted_expended() + actual_size <= old_generation()->get_promoted_reserve()) {
1197 // Assume the entirety of this PLAB will be used for promotion. This prevents promotion from overreach.
1198 // When we retire this plab, we'll unexpend what we don't really use.
1199 ShenandoahThreadLocalData::enable_plab_promotions(thread);
1200 old_generation()->expend_promoted(actual_size);
1201 ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, actual_size);
1202 } else {
1203 disable_plab_promotions = true;
1204 }
1205 } else {
1206 disable_plab_promotions = true;
1207 }
1208 if (disable_plab_promotions) {
1209 // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
1210 ShenandoahThreadLocalData::disable_plab_promotions(thread);
1211 ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1212 }
1213 } else if (req.is_promotion()) {
1214 // Shared promotion. Assume size is requested_bytes.
1215 old_generation()->expend_promoted(requested_bytes);
1216 }
1217 }
1218
1219 // Register the newly allocated object while we're holding the global lock since there's no synchronization
1220 // built in to the implementation of register_object(). There are potential races when multiple independent
1221 // threads are allocating objects, some of which might span the same card region. For example, consider
1222 // a card table's memory region within which three objects are being allocated by three different threads:
1223 //
1224 // objects being "concurrently" allocated:
1225 // [-----a------][-----b-----][--------------c------------------]
1226 // [---- card table memory range --------------]
1227 //
1228 // Before any objects are allocated, this card's memory range holds no objects. Note that allocation of object a
1229 // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1230 // allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1231 // allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1232 // card region.
1233 //
1234 // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1235 // last-start representing object b while first-start represents object c. This is why we need to require all
1236 // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1237 card_scan()->register_object(result);
1238 }
1239 } else {
1240 // The allocation failed. If this was a plab allocation, We've already retired it and no longer have a plab.
1241 if (req.is_old() && req.is_gc_alloc() && (req.type() == ShenandoahAllocRequest::_alloc_plab)) {
1242 // We don't need to disable PLAB promotions because there is no PLAB. We leave promotions enabled because
1243 // this allows the surrounding infrastructure to retry alloc_plab_slow() with a smaller PLAB size.
1244 ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1245 }
1246 }
1247 if ((result != nullptr) || !try_smaller_lab_size) {
1248 return result;
1249 }
1250 // else, fall through to try_smaller_lab_size
1251 } // This closes the block that holds the heap lock, releasing the lock.
1252
1253 // We failed to allocate the originally requested lab size. Let's see if we can allocate a smaller lab size.
1254 if (req.size() == smaller_lab_size) {
1255 // If we were already trying to allocate min size, no value in attempting to repeat the same. End the recursion.
1256 return nullptr;
1257 }
1258
1259 // We arrive here if the tlab allocation request can be resized to fit within young_available
1260 assert((req.affiliation() == YOUNG_GENERATION) && req.is_lab_alloc() && req.is_mutator_alloc() &&
1261 (smaller_lab_size < req.size()), "Only shrink allocation request size for TLAB allocations");
1262
1263 // By convention, ShenandoahAllocationRequest is primarily read-only. The only mutable instance data is represented by
1264 // actual_size(), which is overwritten with the size of the allocaion when the allocation request is satisfied. We use a
1265 // recursive call here rather than introducing new methods to mutate the existing ShenandoahAllocationRequest argument.
1266 // Mutation of the existing object might result in astonishing results if calling contexts assume the content of immutable
1267 // fields remain constant. The original TLAB allocation request was for memory that exceeded the current capacity. We'll
1268 // attempt to allocate a smaller TLAB. If this is successful, we'll update actual_size() of our incoming
1269 // ShenandoahAllocRequest. If the recursive request fails, we'll simply return nullptr.
1270
1271 // Note that we've relinquished the HeapLock and some other thread may perform additional allocation before our recursive
1272 // call reacquires the lock. If that happens, we will need another recursive call to further reduce the size of our request
1273 // for each time another thread allocates young memory during the brief intervals that the heap lock is available to
1274 // interfering threads. We expect this interference to be rare. The recursion bottoms out when young_available is
1275 // smaller than req.min_size(). The inner-nested call to allocate_memory_under_lock() uses the same min_size() value
1276 // as this call, but it uses a preferred size() that is smaller than our preferred size, and is no larger than what we most
1277 // recently saw as the memory currently available within the young generation.
1278
1279 // TODO: At the expense of code clarity, we could rewrite this recursive solution to use iteration. We need at most one
1280 // extra instance of the ShenandoahAllocRequest, which we can re-initialize multiple times inside a loop, with one iteration
1281 // of the loop required for each time the existing solution would recurse. An iterative solution would be more efficient
1282 // in CPU time and stack memory utilization. The expectation is that it is very rare that we would recurse more than once
1283 // so making this change is not currently seen as a high priority.
1284
1285 ShenandoahAllocRequest smaller_req = ShenandoahAllocRequest::for_tlab(req.min_size(), smaller_lab_size);
1286
1287 // Note that shrinking the preferred size gets us past the gatekeeper that checks whether there's available memory to
1288 // satisfy the allocation request. The reality is the actual TLAB size is likely to be even smaller, because it will
1289 // depend on how much memory is available within mutator regions that are not yet fully used.
1290 HeapWord* result = allocate_memory_under_lock(smaller_req, in_new_region);
1291 if (result != nullptr) {
1292 req.set_actual_size(smaller_req.actual_size());
1293 }
1294 return result;
1295 }
1296
1297 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1298 bool* gc_overhead_limit_was_exceeded) {
1299 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1300 return allocate_memory(req);
1301 }
1302
1303 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1304 size_t size,
1305 Metaspace::MetadataType mdtype) {
1306 MetaWord* result;
1307
1308 // Inform metaspace OOM to GC heuristics if class unloading is possible.
1309 ShenandoahHeuristics* h = global_generation()->heuristics();
1310 if (h->can_unload_classes()) {
1311 h->record_metaspace_oom();
1312 }
1313
1314 // Expand and retry allocation
1315 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1316 if (result != nullptr) {
1317 return result;
1318 }
1319
1320 // Start full GC
1321 collect(GCCause::_metadata_GC_clear_soft_refs);
1322
1323 // Retry allocation
1324 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1325 if (result != nullptr) {
1326 return result;
1327 }
1328
1329 // Expand and retry allocation
1330 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1383 private:
1384 void do_work() {
1385 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1386 ShenandoahHeapRegion* r;
1387 while ((r =_cs->claim_next()) != nullptr) {
1388 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1389 _sh->marked_object_iterate(r, &cl);
1390
1391 if (ShenandoahPacing) {
1392 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1393 }
1394
1395 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1396 break;
1397 }
1398 }
1399 }
1400 };
1401
1402 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1403 if (mode()->is_generational()) {
1404 ShenandoahRegionIterator regions;
1405 ShenandoahGenerationalEvacuationTask task(ShenandoahGenerationalHeap::heap(), ®ions, concurrent);
1406 workers()->run_task(&task);
1407 } else {
1408 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1409 workers()->run_task(&task);
1410 }
1411 }
1412
1413 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1414 assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1415 if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1416 // This thread went through the OOM during evac protocol. It is safe to return
1417 // the forward pointer. It must not attempt to evacuate any other objects.
1418 return ShenandoahBarrierSet::resolve_forwarded(p);
1419 }
1420
1421 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1422
1423 ShenandoahHeapRegion* r = heap_region_containing(p);
1424 assert(!r->is_humongous(), "never evacuate humongous objects");
1425
1426 ShenandoahAffiliation target_gen = r->affiliation();
1427 return try_evacuate_object(p, thread, r, target_gen);
1428 }
1429
1430 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1431 ShenandoahAffiliation target_gen) {
1432 assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1433 assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1434 bool alloc_from_lab = true;
1435 HeapWord* copy = nullptr;
1436 size_t size = p->size();
1437
1438 #ifdef ASSERT
1439 if (ShenandoahOOMDuringEvacALot &&
1440 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1441 copy = nullptr;
1442 } else {
1443 #endif
1444 if (UseTLAB) {
1445 copy = allocate_from_gclab(thread, size);
1446 if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
1447 // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve. Try resetting
1448 // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
1449 // TODO: is this right? using PLAB::min_size() here for gc lab size?
1450 ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
1451 copy = allocate_from_gclab(thread, size);
1452 // If we still get nullptr, we'll try a shared allocation below.
1453 }
1454 }
1455
1456 if (copy == nullptr) {
1457 // If we failed to allocate in LAB, we'll try a shared allocation.
1458 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1459 copy = allocate_memory(req);
1460 alloc_from_lab = false;
1461 }
1462 #ifdef ASSERT
1463 }
1464 #endif
1465
1466 if (copy == nullptr) {
1467 control_thread()->handle_alloc_failure_evac(size);
1468
1469 _oom_evac_handler.handle_out_of_memory_during_evacuation();
1470
1471 return ShenandoahBarrierSet::resolve_forwarded(p);
1472 }
1473
1474 // Copy the object:
1475 _evac_tracker->begin_evacuation(thread, size * HeapWordSize);
1476 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1477
1478 oop copy_val = cast_to_oop(copy);
1479
1480 // Try to install the new forwarding pointer.
1481 ContinuationGCSupport::relativize_stack_chunk(copy_val);
1482
1483 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1484 if (result == copy_val) {
1485 // Successfully evacuated. Our copy is now the public one!
1486 _evac_tracker->end_evacuation(thread, size * HeapWordSize);
1487 shenandoah_assert_correct(nullptr, copy_val);
1488 return copy_val;
1489 } else {
1490 // Failed to evacuate. We need to deal with the object that is left behind. Since this
1491 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1492 // But if it happens to contain references to evacuated regions, those references would
1493 // not get updated for this stale copy during this cycle, and we will crash while scanning
1494 // it the next cycle.
1495 if (alloc_from_lab) {
1496 // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1497 // object will overwrite this stale copy, or the filler object on LAB retirement will
1498 // do this.
1499 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1500 } else {
1501 // For non-LAB allocations, we have no way to retract the allocation, and
1502 // have to explicitly overwrite the copy with the filler object. With that overwrite,
1503 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1504 assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1505 fill_with_object(copy, size);
1506 shenandoah_assert_correct(nullptr, copy_val);
1507 // For non-LAB allocations, the object has already been registered
1508 }
1509 shenandoah_assert_correct(nullptr, result);
1510 return result;
1511 }
1512 }
1513
1514 void ShenandoahHeap::trash_cset_regions() {
1515 ShenandoahHeapLocker locker(lock());
1516
1517 ShenandoahCollectionSet* set = collection_set();
1518 ShenandoahHeapRegion* r;
1519 set->clear_current_index();
1520 while ((r = set->next()) != nullptr) {
1521 r->make_trash();
1522 }
1523 collection_set()->clear();
1524 }
1525
1526 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1527 st->print_cr("Heap Regions:");
1528 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1529 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1530 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1531 st->print_cr("UWM=update watermark, U=used");
1532 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1533 st->print_cr("S=shared allocs, L=live data");
1534 st->print_cr("CP=critical pins");
1535
1536 for (size_t i = 0; i < num_regions(); i++) {
1537 get_region(i)->print_on(st);
1538 }
1539 }
1540
1541 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1542 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1543
1544 oop humongous_obj = cast_to_oop(start->bottom());
1545 size_t size = humongous_obj->size();
1546 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1547 size_t index = start->index() + required_regions - 1;
1548
1549 assert(!start->has_live(), "liveness must be zero");
1550
1551 for(size_t i = 0; i < required_regions; i++) {
1552 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1553 // as it expects that every region belongs to a humongous region starting with a humongous start region.
1554 ShenandoahHeapRegion* region = get_region(index --);
1555
1556 assert(region->is_humongous(), "expect correct humongous start or continuation");
1557 assert(!region->is_cset(), "Humongous region should not be in collection set");
1558
1559 region->make_trash_immediate();
1560 }
1561 return required_regions;
1562 }
1563
1564 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1565 public:
1566 ShenandoahCheckCleanGCLABClosure() {}
1567 void do_thread(Thread* thread) {
1568 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1569 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1570 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1571
1572 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1573 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1574 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1575 assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1576 }
1577 }
1578 };
1579
1580 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1581 private:
1582 bool const _resize;
1583 public:
1584 ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1585 void do_thread(Thread* thread) {
1586 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1587 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1588 gclab->retire();
1589 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1590 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1591 }
1592
1593 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1594 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1595 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1596
1597 // There are two reasons to retire all plabs between old-gen evacuation passes.
1598 // 1. We need to make the plab memory parsable by remembered-set scanning.
1599 // 2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1600 ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1601 if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1602 ShenandoahThreadLocalData::set_plab_size(thread, 0);
1603 }
1604 }
1605 }
1606 };
1607
1608 void ShenandoahHeap::labs_make_parsable() {
1609 assert(UseTLAB, "Only call with UseTLAB");
1610
1611 ShenandoahRetireGCLABClosure cl(false);
1612
1613 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1614 ThreadLocalAllocBuffer& tlab = t->tlab();
1615 tlab.make_parsable();
1616 cl.do_thread(t);
1617 }
1618
1619 workers()->threads_do(&cl);
1620 }
1621
1622 void ShenandoahHeap::tlabs_retire(bool resize) {
1623 assert(UseTLAB, "Only call with UseTLAB");
1624 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1644 #endif
1645 }
1646
1647 void ShenandoahHeap::gclabs_retire(bool resize) {
1648 assert(UseTLAB, "Only call with UseTLAB");
1649 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1650
1651 ShenandoahRetireGCLABClosure cl(resize);
1652 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1653 cl.do_thread(t);
1654 }
1655 workers()->threads_do(&cl);
1656
1657 if (safepoint_workers() != nullptr) {
1658 safepoint_workers()->threads_do(&cl);
1659 }
1660 }
1661
1662 // Returns size in bytes
1663 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1664 // Return the max allowed size, and let the allocation path figure out the safe size for current allocation.
1665 return ShenandoahHeapRegion::max_tlab_size_bytes();
1666 }
1667
1668 size_t ShenandoahHeap::max_tlab_size() const {
1669 // Returns size in words
1670 return ShenandoahHeapRegion::max_tlab_size_words();
1671 }
1672
1673 void ShenandoahHeap::collect(GCCause::Cause cause) {
1674 control_thread()->request_gc(cause);
1675 }
1676
1677 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1678 //assert(false, "Shouldn't need to do full collections");
1679 }
1680
1681 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1682 ShenandoahHeapRegion* r = heap_region_containing(addr);
1683 if (r != nullptr) {
1684 return r->block_start(addr);
1685 }
1686 return nullptr;
1687 }
1688
1689 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1690 ShenandoahHeapRegion* r = heap_region_containing(addr);
1691 return r->block_is_obj(addr);
1692 }
1693
1694 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1695 return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1696 }
1697
1698 void ShenandoahHeap::prepare_for_verify() {
1699 if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1700 labs_make_parsable();
1701 }
1702 }
1703
1704 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1705 if (_shenandoah_policy->is_at_shutdown()) {
1706 return;
1707 }
1708
1709 if (_control_thread != nullptr) {
1710 tcl->do_thread(_control_thread);
1711 }
1712
1713 workers()->threads_do(tcl);
1714 if (_safepoint_workers != nullptr) {
1715 _safepoint_workers->threads_do(tcl);
1716 }
1717 }
1718
1719 void ShenandoahHeap::print_tracing_info() const {
1720 LogTarget(Info, gc, stats) lt;
1721 if (lt.is_enabled()) {
1722 ResourceMark rm;
1723 LogStream ls(lt);
1724
1725 phase_timings()->print_global_on(&ls);
1726
1727 ls.cr();
1728 ls.cr();
1729
1730 shenandoah_policy()->print_gc_stats(&ls);
1731
1732 ls.cr();
1733
1734 evac_tracker()->print_global_on(&ls);
1735
1736 ls.cr();
1737 ls.cr();
1738 }
1739 }
1740
1741 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1742 shenandoah_policy()->record_collection_cause(cause);
1743
1744 set_gc_cause(cause);
1745 set_gc_generation(generation);
1746
1747 generation->heuristics()->record_cycle_start();
1748 }
1749
1750 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1751 generation->heuristics()->record_cycle_end();
1752 if (mode()->is_generational() && generation->is_global()) {
1753 // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1754 young_generation()->heuristics()->record_cycle_end();
1755 old_generation()->heuristics()->record_cycle_end();
1756 }
1757 set_gc_cause(GCCause::_no_gc);
1758 }
1759
1760 void ShenandoahHeap::verify(VerifyOption vo) {
1761 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1762 if (ShenandoahVerify) {
1763 verifier()->verify_generic(vo);
1764 } else {
1765 // TODO: Consider allocating verification bitmaps on demand,
1766 // and turn this on unconditionally.
1767 }
1768 }
1769 }
1770 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1771 return _free_set->capacity();
1772 }
1773
1774 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1775 private:
1776 MarkBitMap* _bitmap;
1777 ShenandoahScanObjectStack* _oop_stack;
1778 ShenandoahHeap* const _heap;
1779 ShenandoahMarkingContext* const _marking_context;
2061 if (start >= max) break;
2062
2063 for (size_t i = cur; i < end; i++) {
2064 ShenandoahHeapRegion* current = _heap->get_region(i);
2065 _blk->heap_region_do(current);
2066 }
2067 }
2068 }
2069 };
2070
2071 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2072 assert(blk->is_thread_safe(), "Only thread-safe closures here");
2073 if (num_regions() > ShenandoahParallelRegionStride) {
2074 ShenandoahParallelHeapRegionTask task(blk);
2075 workers()->run_task(&task);
2076 } else {
2077 heap_region_iterate(blk);
2078 }
2079 }
2080
2081 class ShenandoahRendezvousClosure : public HandshakeClosure {
2082 public:
2083 inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
2084 inline void do_thread(Thread* thread) {}
2085 };
2086
2087 void ShenandoahHeap::rendezvous_threads() {
2088 ShenandoahRendezvousClosure cl;
2089 Handshake::execute(&cl);
2090 }
2091
2092 void ShenandoahHeap::recycle_trash() {
2093 free_set()->recycle_trash();
2094 }
2095
2096 void ShenandoahHeap::do_class_unloading() {
2097 _unloader.unload();
2098 if (mode()->is_generational()) {
2099 old_generation()->set_parseable(false);
2100 }
2101 }
2102
2103 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
2104 // Weak refs processing
2105 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2106 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2107 ShenandoahTimingsTracker t(phase);
2108 ShenandoahGCWorkerPhase worker_phase(phase);
2109 active_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2110 }
2111
2112 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
2113 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2114
2115 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2116 // make them parsable for update code to work correctly. Plus, we can compute new sizes
2117 // for future GCLABs here.
2118 if (UseTLAB) {
2119 ShenandoahGCPhase phase(concurrent ?
2120 ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
2121 ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2122 gclabs_retire(ResizeTLAB);
2123 }
2124
2125 _update_refs_iterator.reset();
2126 }
2127
2128 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
2129 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2130 if (_gc_state_changed) {
2131 _gc_state_changed = false;
2132 char state = gc_state();
2133 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
2134 ShenandoahThreadLocalData::set_gc_state(t, state);
2135 }
2136 }
2137 }
2138
2139 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
2140 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2141 _gc_state.set_cond(mask, value);
2142 _gc_state_changed = true;
2143 }
2144
2145 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2146 uint mask;
2147 assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
2148 if (!in_progress && is_concurrent_old_mark_in_progress()) {
2149 assert(mode()->is_generational(), "Only generational GC has old marking");
2150 assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
2151 // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
2152 mask = YOUNG_MARKING;
2153 } else {
2154 mask = MARKING | YOUNG_MARKING;
2155 }
2156 set_gc_state(mask, in_progress);
2157 manage_satb_barrier(in_progress);
2158 }
2159
2160 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2161 #ifdef ASSERT
2162 // has_forwarded_objects() iff UPDATEREFS or EVACUATION
2163 bool has_forwarded = has_forwarded_objects();
2164 bool updating_or_evacuating = _gc_state.is_set(UPDATEREFS | EVACUATION);
2165 bool evacuating = _gc_state.is_set(EVACUATION);
2166 assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
2167 "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
2168 #endif
2169 if (!in_progress && is_concurrent_young_mark_in_progress()) {
2170 // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2171 assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2172 set_gc_state(OLD_MARKING, in_progress);
2173 } else {
2174 set_gc_state(MARKING | OLD_MARKING, in_progress);
2175 }
2176 manage_satb_barrier(in_progress);
2177 }
2178
2179 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2180 return old_generation()->is_preparing_for_mark();
2181 }
2182
2183 void ShenandoahHeap::set_aging_cycle(bool in_progress) {
2184 _is_aging_cycle.set_cond(in_progress);
2185 }
2186
2187 void ShenandoahHeap::manage_satb_barrier(bool active) {
2188 if (is_concurrent_mark_in_progress()) {
2189 // Ignore request to deactivate barrier while concurrent mark is in progress.
2190 // Do not attempt to re-activate the barrier if it is already active.
2191 if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2192 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2193 }
2194 } else {
2195 // No concurrent marking is in progress so honor request to deactivate,
2196 // but only if the barrier is already active.
2197 if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2198 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2199 }
2200 }
2201 }
2202
2203 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2204 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2205 set_gc_state(EVACUATION, in_progress);
2206 }
2207
2208 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2209 if (in_progress) {
2210 _concurrent_strong_root_in_progress.set();
2211 } else {
2212 _concurrent_strong_root_in_progress.unset();
2213 }
2214 }
2215
2216 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2217 set_gc_state(WEAK_ROOTS, cond);
2218 }
2219
2220 GCTracer* ShenandoahHeap::tracer() {
2221 return shenandoah_policy()->tracer();
2222 }
2223
2224 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2225 return _free_set->used();
2226 }
2227
2228 bool ShenandoahHeap::try_cancel_gc() {
2229 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2230 return prev == CANCELLABLE;
2231 }
2232
2233 void ShenandoahHeap::cancel_concurrent_mark() {
2234 _young_generation->cancel_marking();
2235 _old_generation->cancel_marking();
2236 _global_generation->cancel_marking();
2237
2238 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2239 }
2240
2241 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2242 if (try_cancel_gc()) {
2243 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2244 log_info(gc)("%s", msg.buffer());
2245 Events::log(Thread::current(), "%s", msg.buffer());
2246 _cancel_requested_time = os::elapsedTime();
2247 }
2248 }
2249
2250 uint ShenandoahHeap::max_workers() {
2251 return _max_workers;
2252 }
2253
2254 void ShenandoahHeap::stop() {
2255 // The shutdown sequence should be able to terminate when GC is running.
2256
2257 // Step 1. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2258 _shenandoah_policy->record_shutdown();
2259
2260 // Step 2. Notify control thread that we are in shutdown.
2261 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2262 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2263 control_thread()->prepare_for_graceful_shutdown();
2264
2265 // Step 3. Notify GC workers that we are cancelling GC.
2266 cancel_gc(GCCause::_shenandoah_stop_vm);
2267
2268 // Step 4. Wait until GC worker exits normally.
2269 control_thread()->stop();
2270 }
2271
2272 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2273 if (!unload_classes()) return;
2274 ClassUnloadingContext ctx(_workers->active_workers(),
2275 true /* unregister_nmethods_during_purge */,
2276 false /* lock_nmethod_free_separately */);
2277
2278 // Unload classes and purge SystemDictionary.
2279 {
2280 ShenandoahPhaseTimings::Phase phase = full_gc ?
2281 ShenandoahPhaseTimings::full_gc_purge_class_unload :
2282 ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2283 ShenandoahIsAliveSelector is_alive;
2284 {
2285 CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
2286 ShenandoahGCPhase gc_phase(phase);
2287 ShenandoahGCWorkerPhase worker_phase(phase);
2288 bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
2350 }
2351
2352 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2353 set_gc_state(HAS_FORWARDED, cond);
2354 }
2355
2356 void ShenandoahHeap::set_unload_classes(bool uc) {
2357 _unload_classes.set_cond(uc);
2358 }
2359
2360 bool ShenandoahHeap::unload_classes() const {
2361 return _unload_classes.is_set();
2362 }
2363
2364 address ShenandoahHeap::in_cset_fast_test_addr() {
2365 ShenandoahHeap* heap = ShenandoahHeap::heap();
2366 assert(heap->collection_set() != nullptr, "Sanity");
2367 return (address) heap->collection_set()->biased_map_address();
2368 }
2369
2370 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2371 if (mode()->is_generational()) {
2372 young_generation()->reset_bytes_allocated_since_gc_start();
2373 old_generation()->reset_bytes_allocated_since_gc_start();
2374 }
2375
2376 global_generation()->reset_bytes_allocated_since_gc_start();
2377 }
2378
2379 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2380 _degenerated_gc_in_progress.set_cond(in_progress);
2381 }
2382
2383 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2384 _full_gc_in_progress.set_cond(in_progress);
2385 }
2386
2387 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2388 assert (is_full_gc_in_progress(), "should be");
2389 _full_gc_move_in_progress.set_cond(in_progress);
2390 }
2391
2392 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2393 set_gc_state(UPDATEREFS, in_progress);
2394 }
2395
2396 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2420 if (r->is_active()) {
2421 if (r->is_pinned()) {
2422 if (r->pin_count() == 0) {
2423 r->make_unpinned();
2424 }
2425 } else {
2426 if (r->pin_count() > 0) {
2427 r->make_pinned();
2428 }
2429 }
2430 }
2431 }
2432
2433 assert_pinned_region_status();
2434 }
2435
2436 #ifdef ASSERT
2437 void ShenandoahHeap::assert_pinned_region_status() {
2438 for (size_t i = 0; i < num_regions(); i++) {
2439 ShenandoahHeapRegion* r = get_region(i);
2440 if (active_generation()->contains(r)) {
2441 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2442 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2443 }
2444 }
2445 }
2446 #endif
2447
2448 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2449 return _gc_timer;
2450 }
2451
2452 void ShenandoahHeap::prepare_concurrent_roots() {
2453 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2454 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2455 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2456 set_concurrent_weak_root_in_progress(true);
2457 if (unload_classes()) {
2458 _unloader.prepare();
2459 }
2460 }
2461
2462 void ShenandoahHeap::finish_concurrent_roots() {
2463 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2477 ParallelGCThreads, nworkers);
2478 } else {
2479 // Use ConcGCThreads outside safepoints
2480 assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2481 ConcGCThreads, nworkers);
2482 }
2483 }
2484 #endif
2485
2486 ShenandoahVerifier* ShenandoahHeap::verifier() {
2487 guarantee(ShenandoahVerify, "Should be enabled");
2488 assert (_verifier != nullptr, "sanity");
2489 return _verifier;
2490 }
2491
2492 template<bool CONCURRENT>
2493 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2494 private:
2495 ShenandoahHeap* _heap;
2496 ShenandoahRegionIterator* _regions;
2497 ShenandoahRegionChunkIterator* _work_chunks;
2498
2499 public:
2500 explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions,
2501 ShenandoahRegionChunkIterator* work_chunks) :
2502 WorkerTask("Shenandoah Update References"),
2503 _heap(ShenandoahHeap::heap()),
2504 _regions(regions),
2505 _work_chunks(work_chunks)
2506 {
2507 bool old_bitmap_stable = _heap->old_generation()->is_mark_complete();
2508 log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
2509 }
2510
2511 void work(uint worker_id) {
2512 if (CONCURRENT) {
2513 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2514 ShenandoahSuspendibleThreadSetJoiner stsj;
2515 do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2516 } else {
2517 ShenandoahParallelWorkerSession worker_session(worker_id);
2518 do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2519 }
2520 }
2521
2522 private:
2523 template<class T>
2524 void do_work(uint worker_id) {
2525 T cl;
2526 if (CONCURRENT && (worker_id == 0)) {
2527 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2528 // results of evacuation. These reserves are no longer necessary because evacuation has completed.
2529 size_t cset_regions = _heap->collection_set()->count();
2530 // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled, because
2531 // we need the reclaimed collection set regions to replenish the collector reserves
2532 _heap->free_set()->move_collector_sets_to_mutator(cset_regions);
2533 }
2534 // If !CONCURRENT, there's no value in expanding Mutator free set
2535
2536 ShenandoahHeapRegion* r = _regions->next();
2537 // We update references for global, old, and young collections.
2538 assert(_heap->active_generation()->is_mark_complete(), "Expected complete marking");
2539 ShenandoahMarkingContext* const ctx = _heap->marking_context();
2540 bool is_mixed = _heap->collection_set()->has_old_regions();
2541 while (r != nullptr) {
2542 HeapWord* update_watermark = r->get_update_watermark();
2543 assert (update_watermark >= r->bottom(), "sanity");
2544
2545 log_debug(gc)("ShenandoahUpdateHeapRefsTask::do_work(%u) looking at region " SIZE_FORMAT, worker_id, r->index());
2546 bool region_progress = false;
2547 if (r->is_active() && !r->is_cset()) {
2548 if (!_heap->mode()->is_generational() || r->is_young()) {
2549 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2550 region_progress = true;
2551 } else if (r->is_old()) {
2552 if (_heap->active_generation()->is_global()) {
2553 // Note that GLOBAL collection is not as effectively balanced as young and mixed cycles. This is because
2554 // concurrent GC threads are parceled out entire heap regions of work at a time and there
2555 // is no "catchup phase" consisting of remembered set scanning, during which parcels of work are smaller
2556 // and more easily distributed more fairly across threads.
2557
2558 // TODO: Consider an improvement to load balance GLOBAL GC.
2559 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2560 region_progress = true;
2561 }
2562 // Otherwise, this is an old region in a young or mixed cycle. Process it during a second phase, below.
2563 // Don't bother to report pacing progress in this case.
2564 } else {
2565 // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
2566 // to a non-free active region while this loop is executing. Whenever this happens, the changing of a region's
2567 // active status may propagate at a different speed than the changing of the region's affiliation.
2568
2569 // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
2570 // by this thread before the region's affiliation() is seen by this thread.
2571
2572 // It's ok for this race to occur because the newly transformed region does not have any references to be
2573 // updated.
2574
2575 assert(r->get_update_watermark() == r->bottom(),
2576 "%s Region " SIZE_FORMAT " is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
2577 r->affiliation_name(), r->index());
2578 }
2579 }
2580 if (region_progress && ShenandoahPacing) {
2581 _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2582 }
2583 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2584 return;
2585 }
2586 r = _regions->next();
2587 }
2588
2589 if (_heap->mode()->is_generational() && !_heap->active_generation()->is_global()) {
2590 // Since this is generational and not GLOBAL, we have to process the remembered set. There's no remembered
2591 // set processing if not in generational mode or if GLOBAL mode.
2592
2593 // After this thread has exhausted its traditional update-refs work, it continues with updating refs within remembered set.
2594 // The remembered set workload is better balanced between threads, so threads that are "behind" can catch up with other
2595 // threads during this phase, allowing all threads to work more effectively in parallel.
2596 struct ShenandoahRegionChunk assignment;
2597 RememberedScanner* scanner = _heap->card_scan();
2598
2599 while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) {
2600 // Keep grabbing next work chunk to process until finished, or asked to yield
2601 ShenandoahHeapRegion* r = assignment._r;
2602 if (r->is_active() && !r->is_cset() && r->is_old()) {
2603 HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
2604 HeapWord* end_of_range = r->get_update_watermark();
2605 if (end_of_range > start_of_range + assignment._chunk_size) {
2606 end_of_range = start_of_range + assignment._chunk_size;
2607 }
2608
2609 // Old region in a young cycle or mixed cycle.
2610 if (is_mixed) {
2611 // TODO: For mixed evac, consider building an old-gen remembered set that allows restricted updating
2612 // within old-gen HeapRegions. This remembered set can be constructed by old-gen concurrent marking
2613 // and augmented by card marking. For example, old-gen concurrent marking can remember for each old-gen
2614 // card which other old-gen regions it refers to: none, one-other specifically, multiple-other non-specific.
2615 // Update-references when _mixed_evac processess each old-gen memory range that has a traditional DIRTY
2616 // card or if the "old-gen remembered set" indicates that this card holds pointers specifically to an
2617 // old-gen region in the most recent collection set, or if this card holds pointers to other non-specific
2618 // old-gen heap regions.
2619
2620 if (r->is_humongous()) {
2621 if (start_of_range < end_of_range) {
2622 // Need to examine both dirty and clean cards during mixed evac.
2623 r->oop_iterate_humongous_slice(&cl, false, start_of_range, assignment._chunk_size, true);
2624 }
2625 } else {
2626 // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
2627 // and filled. Use mark bits to find objects that need to be updated.
2628 //
2629 // Future TODO: establish a second remembered set to identify which old-gen regions point to other old-gen
2630 // regions which are in the collection set for a particular mixed evacuation.
2631 if (start_of_range < end_of_range) {
2632 HeapWord* p = nullptr;
2633 size_t card_index = scanner->card_index_for_addr(start_of_range);
2634 // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
2635 ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
2636
2637 // Any object that begins in a previous range is part of a different scanning assignment. Any object that
2638 // starts after end_of_range is also not my responsibility. (Either allocated during evacuation, so does
2639 // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
2640
2641 // Find the first object that begins in my range, if there is one.
2642 p = start_of_range;
2643 oop obj = cast_to_oop(p);
2644 HeapWord* tams = ctx->top_at_mark_start(r);
2645 if (p >= tams) {
2646 // We cannot use ctx->is_marked(obj) to test whether an object begins at this address. Instead,
2647 // we need to use the remembered set crossing map to advance p to the first object that starts
2648 // within the enclosing card.
2649
2650 while (true) {
2651 HeapWord* first_object = scanner->first_object_in_card(card_index);
2652 if (first_object != nullptr) {
2653 p = first_object;
2654 break;
2655 } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
2656 card_index++;
2657 } else {
2658 // Force the loop that follows to immediately terminate.
2659 p = end_of_range;
2660 break;
2661 }
2662 }
2663 obj = cast_to_oop(p);
2664 // Note: p may be >= end_of_range
2665 } else if (!ctx->is_marked(obj)) {
2666 p = ctx->get_next_marked_addr(p, tams);
2667 obj = cast_to_oop(p);
2668 // If there are no more marked objects before tams, this returns tams.
2669 // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
2670 }
2671 while (p < end_of_range) {
2672 // p is known to point to the beginning of marked object obj
2673 objs.do_object(obj);
2674 HeapWord* prev_p = p;
2675 p += obj->size();
2676 if (p < tams) {
2677 p = ctx->get_next_marked_addr(p, tams);
2678 // If there are no more marked objects before tams, this returns tams. Note that tams is
2679 // either >= end_of_range, or tams is the start of an object that is marked.
2680 }
2681 assert(p != prev_p, "Lack of forward progress");
2682 obj = cast_to_oop(p);
2683 }
2684 }
2685 }
2686 } else {
2687 // This is a young evac..
2688 if (start_of_range < end_of_range) {
2689 size_t cluster_size =
2690 CardTable::card_size_in_words() * ShenandoahCardCluster<ShenandoahDirectCardMarkRememberedSet>::CardsPerCluster;
2691 size_t clusters = assignment._chunk_size / cluster_size;
2692 assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
2693 scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id);
2694 }
2695 }
2696 if (ShenandoahPacing && (start_of_range < end_of_range)) {
2697 _heap->pacer()->report_updaterefs(pointer_delta(end_of_range, start_of_range));
2698 }
2699 }
2700 }
2701 }
2702 }
2703 };
2704
2705 void ShenandoahHeap::update_heap_references(bool concurrent) {
2706 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2707 uint nworkers = workers()->active_workers();
2708 ShenandoahRegionChunkIterator work_list(nworkers);
2709
2710 if (concurrent) {
2711 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list);
2712 workers()->run_task(&task);
2713 } else {
2714 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list);
2715 workers()->run_task(&task);
2716 }
2717 if (ShenandoahEnableCardStats && card_scan()!=nullptr) { // generational check proxy
2718 card_scan()->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS);
2719 }
2720 }
2721
2722 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2723 private:
2724 ShenandoahMarkingContext* _ctx;
2725 ShenandoahHeapLock* const _lock;
2726 bool _is_generational;
2727
2728 public:
2729 ShenandoahFinalUpdateRefsUpdateRegionStateClosure(ShenandoahMarkingContext* ctx) :
2730 _ctx(ctx), _lock(ShenandoahHeap::heap()->lock()),
2731 _is_generational(ShenandoahHeap::heap()->mode()->is_generational()) { }
2732
2733 void heap_region_do(ShenandoahHeapRegion* r) override {
2734
2735 // Maintenance of region age must follow evacuation in order to account for evacuation allocations within survivor
2736 // regions. We consult region age during the subsequent evacuation to determine whether certain objects need to
2737 // be promoted.
2738 if (_is_generational && r->is_young() && r->is_active()) {
2739 HeapWord *tams = _ctx->top_at_mark_start(r);
2740 HeapWord *top = r->top();
2741
2742 // Allocations move the watermark when top moves. However, compacting
2743 // objects will sometimes lower top beneath the watermark, after which,
2744 // attempts to read the watermark will assert out (watermark should not be
2745 // higher than top).
2746 if (top > tams) {
2747 // There have been allocations in this region since the start of the cycle.
2748 // Any objects new to this region must not assimilate elevated age.
2749 r->reset_age();
2750 } else if (ShenandoahHeap::heap()->is_aging_cycle()) {
2751 r->increment_age();
2752 }
2753 }
2754
2755 // Drop unnecessary "pinned" state from regions that does not have CP marks
2756 // anymore, as this would allow trashing them.
2757 if (r->is_active()) {
2758 if (r->is_pinned()) {
2759 if (r->pin_count() == 0) {
2760 ShenandoahHeapLocker locker(_lock);
2761 r->make_unpinned();
2762 }
2763 } else {
2764 if (r->pin_count() > 0) {
2765 ShenandoahHeapLocker locker(_lock);
2766 r->make_pinned();
2767 }
2768 }
2769 }
2770 }
2771
2772 bool is_thread_safe() override { return true; }
2773 };
2774
2775 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2776 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2777 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2778
2779 {
2780 ShenandoahGCPhase phase(concurrent ?
2781 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2782 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2783 ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl (active_generation()->complete_marking_context());
2784 parallel_heap_region_iterate(&cl);
2785
2786 assert_pinned_region_status();
2787 }
2788
2789 {
2790 ShenandoahGCPhase phase(concurrent ?
2791 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2792 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2793 trash_cset_regions();
2794 }
2795 }
2796
2797 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2798 ShenandoahGCPhase phase(concurrent ?
2799 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2800 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2801 ShenandoahHeapLocker locker(lock());
2802 size_t young_cset_regions, old_cset_regions;
2803 size_t first_old_region, last_old_region, old_region_count;
2804 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2805 // If there are no old regions, first_old_region will be greater than last_old_region
2806 assert((first_old_region > last_old_region) ||
2807 ((last_old_region + 1 - first_old_region >= old_region_count) &&
2808 get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2809 "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT,
2810 old_region_count, first_old_region, last_old_region);
2811
2812 if (mode()->is_generational()) {
2813 assert(verify_generation_usage(true, old_generation()->used_regions(),
2814 old_generation()->used(), old_generation()->get_humongous_waste(),
2815 true, young_generation()->used_regions(),
2816 young_generation()->used(), young_generation()->get_humongous_waste()),
2817 "Generation accounts are inaccurate");
2818
2819 // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2820 // available for transfer to old. Note that transfer of humongous regions does not impact available.
2821 size_t allocation_runway = young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2822 ShenandoahGenerationalHeap::heap()->compute_old_generation_balance(allocation_runway, old_cset_regions);
2823
2824 // Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
2825 // memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
2826 // regions in place when many of these regular regions have an abundant amount of available memory within them. Fragmentation
2827 // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2828 //
2829 // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2830 // within partially consumed regions of memory.
2831 }
2832 // Rebuild free set based on adjusted generation sizes.
2833 _free_set->rebuild(young_cset_regions, old_cset_regions);
2834
2835 if (mode()->is_generational() && (ShenandoahGenerationalHumongousReserve > 0)) {
2836 old_generation()->maybe_trigger_collection(first_old_region, last_old_region, old_region_count);
2837 }
2838 }
2839
2840 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2841 print_on(st);
2842 st->cr();
2843 print_heap_regions_on(st);
2844 }
2845
2846 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2847 size_t slice = r->index() / _bitmap_regions_per_slice;
2848
2849 size_t regions_from = _bitmap_regions_per_slice * slice;
2850 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2851 for (size_t g = regions_from; g < regions_to; g++) {
2852 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2853 if (skip_self && g == r->index()) continue;
2854 if (get_region(g)->is_committed()) {
2855 return true;
2856 }
2939 void ShenandoahHeap::initialize_serviceability() {
2940 _memory_pool = new ShenandoahMemoryPool(this);
2941 _cycle_memory_manager.add_pool(_memory_pool);
2942 _stw_memory_manager.add_pool(_memory_pool);
2943 }
2944
2945 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2946 GrowableArray<GCMemoryManager*> memory_managers(2);
2947 memory_managers.append(&_cycle_memory_manager);
2948 memory_managers.append(&_stw_memory_manager);
2949 return memory_managers;
2950 }
2951
2952 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2953 GrowableArray<MemoryPool*> memory_pools(1);
2954 memory_pools.append(_memory_pool);
2955 return memory_pools;
2956 }
2957
2958 MemoryUsage ShenandoahHeap::memory_usage() {
2959 return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2960 }
2961
2962 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2963 _heap(ShenandoahHeap::heap()),
2964 _index(0) {}
2965
2966 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2967 _heap(heap),
2968 _index(0) {}
2969
2970 void ShenandoahRegionIterator::reset() {
2971 _index = 0;
2972 }
2973
2974 bool ShenandoahRegionIterator::has_next() const {
2975 return _index < _heap->num_regions();
2976 }
2977
2978 char ShenandoahHeap::gc_state() const {
2979 return _gc_state.raw_value();
2980 }
2981
2982 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2983 #ifdef ASSERT
2984 assert(_liveness_cache != nullptr, "sanity");
2985 assert(worker_id < _max_workers, "sanity");
2986 for (uint i = 0; i < num_regions(); i++) {
2987 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2988 }
2989 #endif
2990 return _liveness_cache[worker_id];
2991 }
2992
2993 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2994 assert(worker_id < _max_workers, "sanity");
2995 assert(_liveness_cache != nullptr, "sanity");
2996 ShenandoahLiveData* ld = _liveness_cache[worker_id];
2997
2998 for (uint i = 0; i < num_regions(); i++) {
2999 ShenandoahLiveData live = ld[i];
3000 if (live > 0) {
3001 ShenandoahHeapRegion* r = get_region(i);
3002 r->increase_live_data_gc_words(live);
3003 ld[i] = 0;
3004 }
3005 }
3006 }
3007
3008 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
3009 if (is_idle()) return false;
3010
3011 // Objects allocated after marking start are implicitly alive, don't need any barriers during
3012 // marking phase.
3013 if (is_concurrent_mark_in_progress() &&
3014 !marking_context()->allocated_after_mark_start(obj)) {
3015 return true;
3016 }
3017
3018 // Can not guarantee obj is deeply good.
3019 if (has_forwarded_objects()) {
3020 return true;
3021 }
3022
3023 return false;
3024 }
3025
3026 void ShenandoahHeap::transfer_old_pointers_from_satb() {
3027 _old_generation->transfer_pointers_from_satb();
3028 }
3029
3030 bool ShenandoahHeap::verify_generation_usage(bool verify_old, size_t old_regions, size_t old_bytes, size_t old_waste,
3031 bool verify_young, size_t young_regions, size_t young_bytes, size_t young_waste) {
3032 size_t tally_old_regions = 0;
3033 size_t tally_old_bytes = 0;
3034 size_t tally_old_waste = 0;
3035 size_t tally_young_regions = 0;
3036 size_t tally_young_bytes = 0;
3037 size_t tally_young_waste = 0;
3038
3039 shenandoah_assert_heaplocked_or_safepoint();
3040 for (size_t i = 0; i < num_regions(); i++) {
3041 ShenandoahHeapRegion* r = get_region(i);
3042 if (r->is_old()) {
3043 tally_old_regions++;
3044 tally_old_bytes += r->used();
3045 if (r->is_humongous()) {
3046 ShenandoahHeapRegion* start = r->humongous_start_region();
3047 HeapWord* obj_addr = start->bottom();
3048 oop obj = cast_to_oop(obj_addr);
3049 size_t word_size = obj->size();
3050 HeapWord* end_addr = obj_addr + word_size;
3051 if (end_addr <= r->end()) {
3052 tally_old_waste += (r->end() - end_addr) * HeapWordSize;
3053 }
3054 }
3055 } else if (r->is_young()) {
3056 tally_young_regions++;
3057 tally_young_bytes += r->used();
3058 if (r->is_humongous()) {
3059 ShenandoahHeapRegion* start = r->humongous_start_region();
3060 HeapWord* obj_addr = start->bottom();
3061 oop obj = cast_to_oop(obj_addr);
3062 size_t word_size = obj->size();
3063 HeapWord* end_addr = obj_addr + word_size;
3064 if (end_addr <= r->end()) {
3065 tally_young_waste += (r->end() - end_addr) * HeapWordSize;
3066 }
3067 }
3068 }
3069 }
3070 if (verify_young &&
3071 ((young_regions != tally_young_regions) || (young_bytes != tally_young_bytes) || (young_waste != tally_young_waste))) {
3072 return false;
3073 } else if (verify_old &&
3074 ((old_regions != tally_old_regions) || (old_bytes != tally_old_bytes) || (old_waste != tally_old_waste))) {
3075 return false;
3076 } else {
3077 return true;
3078 }
3079 }
3080
3081 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
3082 if (!mode()->is_generational()) {
3083 return global_generation();
3084 } else if (affiliation == YOUNG_GENERATION) {
3085 return young_generation();
3086 } else if (affiliation == OLD_GENERATION) {
3087 return old_generation();
3088 }
3089
3090 ShouldNotReachHere();
3091 return nullptr;
3092 }
3093
3094 void ShenandoahHeap::log_heap_status(const char* msg) const {
3095 if (mode()->is_generational()) {
3096 young_generation()->log_status(msg);
3097 old_generation()->log_status(msg);
3098 } else {
3099 global_generation()->log_status(msg);
3100 }
3101 }
3102
3103 void ShenandoahHeap::clear_cards_for(ShenandoahHeapRegion* region) {
3104 if (mode()->is_generational()) {
3105 _card_scan->mark_range_as_empty(region->bottom(), pointer_delta(region->end(), region->bottom()));
3106 }
3107 }
3108
3109 void ShenandoahHeap::mark_card_as_dirty(void* location) {
3110 if (mode()->is_generational()) {
3111 _card_scan->mark_card_as_dirty((HeapWord*)location);
3112 }
3113 }
|