1 /*
2 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "memory/allocation.hpp"
28 #include "memory/universe.hpp"
29
30 #include "gc/shared/classUnloadingContext.hpp"
31 #include "gc/shared/gcArguments.hpp"
32 #include "gc/shared/gcTimer.hpp"
33 #include "gc/shared/gcTraceTime.inline.hpp"
34 #include "gc/shared/locationPrinter.inline.hpp"
35 #include "gc/shared/memAllocator.hpp"
36 #include "gc/shared/plab.hpp"
37 #include "gc/shared/tlab_globals.hpp"
38
39 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
40 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
41 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
42 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
43 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahControlThread.hpp"
46 #include "gc/shenandoah/shenandoahFreeSet.hpp"
47 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
48 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
49 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
50 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
51 #include "gc/shenandoah/shenandoahInitLogger.hpp"
52 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
53 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
54 #include "gc/shenandoah/shenandoahMetrics.hpp"
55 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
56 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
58 #include "gc/shenandoah/shenandoahPadding.hpp"
59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
60 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
61 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
63 #include "gc/shenandoah/shenandoahUtils.hpp"
64 #include "gc/shenandoah/shenandoahVerifier.hpp"
65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
69 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
70 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
71 #if INCLUDE_JFR
72 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
73 #endif
74
75 #include "cds/archiveHeapWriter.hpp"
76 #include "classfile/systemDictionary.hpp"
77 #include "code/codeCache.hpp"
78 #include "memory/classLoaderMetaspace.hpp"
79 #include "memory/metaspaceUtils.hpp"
80 #include "nmt/mallocTracker.hpp"
81 #include "nmt/memTracker.hpp"
82 #include "oops/compressedOops.inline.hpp"
83 #include "prims/jvmtiTagMap.hpp"
84 #include "runtime/atomic.hpp"
85 #include "runtime/globals.hpp"
86 #include "runtime/interfaceSupport.inline.hpp"
87 #include "runtime/java.hpp"
88 #include "runtime/orderAccess.hpp"
89 #include "runtime/safepointMechanism.hpp"
90 #include "runtime/stackWatermarkSet.hpp"
144 jint ShenandoahHeap::initialize() {
145 //
146 // Figure out heap sizing
147 //
148
149 size_t init_byte_size = InitialHeapSize;
150 size_t min_byte_size = MinHeapSize;
151 size_t max_byte_size = MaxHeapSize;
152 size_t heap_alignment = HeapAlignment;
153
154 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
155
156 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
157 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
158
159 _num_regions = ShenandoahHeapRegion::region_count();
160 assert(_num_regions == (max_byte_size / reg_size_bytes),
161 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
162 _num_regions, max_byte_size, reg_size_bytes);
163
164 // Now we know the number of regions, initialize the heuristics.
165 initialize_heuristics();
166
167 size_t num_committed_regions = init_byte_size / reg_size_bytes;
168 num_committed_regions = MIN2(num_committed_regions, _num_regions);
169 assert(num_committed_regions <= _num_regions, "sanity");
170 _initial_size = num_committed_regions * reg_size_bytes;
171
172 size_t num_min_regions = min_byte_size / reg_size_bytes;
173 num_min_regions = MIN2(num_min_regions, _num_regions);
174 assert(num_min_regions <= _num_regions, "sanity");
175 _minimum_size = num_min_regions * reg_size_bytes;
176
177 // Default to max heap size.
178 _soft_max_size = _num_regions * reg_size_bytes;
179
180 _committed = _initial_size;
181
182 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
183 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
184 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
185
186 //
200 heap_rs.size(), heap_rs.page_size());
201
202 #if SHENANDOAH_OPTIMIZED_MARKTASK
203 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
204 // Fail if we ever attempt to address more than we can.
205 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
206 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
207 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
208 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
209 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
210 vm_exit_during_initialization("Fatal Error", buf);
211 }
212 #endif
213
214 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
215 if (!_heap_region_special) {
216 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
217 "Cannot commit heap memory");
218 }
219
220 //
221 // Reserve and commit memory for bitmap(s)
222 //
223
224 size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
225 _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
226
227 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
228
229 guarantee(bitmap_bytes_per_region != 0,
230 "Bitmap bytes per region should not be zero");
231 guarantee(is_power_of_2(bitmap_bytes_per_region),
232 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
233
234 if (bitmap_page_size > bitmap_bytes_per_region) {
235 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
236 _bitmap_bytes_per_slice = bitmap_page_size;
237 } else {
238 _bitmap_regions_per_slice = 1;
239 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
240 }
241
242 guarantee(_bitmap_regions_per_slice >= 1,
243 "Should have at least one region per slice: " SIZE_FORMAT,
244 _bitmap_regions_per_slice);
245
246 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
247 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
248 _bitmap_bytes_per_slice, bitmap_page_size);
249
250 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
251 os::trace_page_sizes_for_requested_size("Mark Bitmap",
252 bitmap_size_orig, bitmap_page_size,
253 bitmap.base(),
254 bitmap.size(), bitmap.page_size());
255 MemTracker::record_virtual_memory_tag(bitmap.base(), mtGC);
256 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
257 _bitmap_region_special = bitmap.special();
258
259 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
260 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
261 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
262 if (!_bitmap_region_special) {
263 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
264 "Cannot commit bitmap memory");
265 }
266
267 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
268
269 if (ShenandoahVerify) {
270 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
271 os::trace_page_sizes_for_requested_size("Verify Bitmap",
272 bitmap_size_orig, bitmap_page_size,
273 verify_bitmap.base(),
274 verify_bitmap.size(), verify_bitmap.page_size());
275 if (!verify_bitmap.special()) {
276 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
277 "Cannot commit verification bitmap memory");
278 }
279 MemTracker::record_virtual_memory_tag(verify_bitmap.base(), mtGC);
280 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
281 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
282 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
283 }
284
285 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
286 size_t aux_bitmap_page_size = bitmap_page_size;
287
331 assert(is_aligned(req_addr, cset_align), "Should be aligned");
332 cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
333 if (cset_rs.is_reserved()) {
334 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
335 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
336 break;
337 }
338 }
339
340 if (_collection_set == nullptr) {
341 cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
342 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
343 }
344 os::trace_page_sizes_for_requested_size("Collection Set",
345 cset_size, cset_page_size,
346 cset_rs.base(),
347 cset_rs.size(), cset_rs.page_size());
348 }
349
350 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
351 _free_set = new ShenandoahFreeSet(this, _num_regions);
352
353 {
354 ShenandoahHeapLocker locker(lock());
355
356 for (size_t i = 0; i < _num_regions; i++) {
357 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
358 bool is_committed = i < num_committed_regions;
359 void* loc = region_storage.base() + i * region_align;
360
361 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
362 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
363
364 _marking_context->initialize_top_at_mark_start(r);
365 _regions[i] = r;
366 assert(!collection_set()->is_in(i), "New region should not be in collection set");
367 }
368
369 // Initialize to complete
370 _marking_context->mark_complete();
371
372 _free_set->rebuild();
373 }
374
375 if (AlwaysPreTouch) {
376 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
377 // before initialize() below zeroes it with initializing thread. For any given region,
378 // we touch the region and the corresponding bitmaps from the same thread.
379 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
380
381 _pretouch_heap_page_size = heap_page_size;
382 _pretouch_bitmap_page_size = bitmap_page_size;
383
384 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
385 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
386
387 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
388 _workers->run_task(&bcl);
389
390 ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
391 _workers->run_task(&hcl);
392 }
401 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
402 }
403
404 // There should probably be Shenandoah-specific options for these,
405 // just as there are G1-specific options.
406 {
407 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
408 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
409 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
410 }
411
412 _monitoring_support = new ShenandoahMonitoringSupport(this);
413 _phase_timings = new ShenandoahPhaseTimings(max_workers());
414 ShenandoahCodeRoots::initialize();
415
416 if (ShenandoahPacing) {
417 _pacer = new ShenandoahPacer(this);
418 _pacer->setup_for_idle();
419 }
420
421 _control_thread = new ShenandoahControlThread();
422
423 ShenandoahInitLogger::print();
424
425 return JNI_OK;
426 }
427
428 void ShenandoahHeap::initialize_mode() {
429 if (ShenandoahGCMode != nullptr) {
430 if (strcmp(ShenandoahGCMode, "satb") == 0) {
431 _gc_mode = new ShenandoahSATBMode();
432 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
433 _gc_mode = new ShenandoahPassiveMode();
434 } else {
435 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
436 }
437 } else {
438 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
439 }
440 _gc_mode->initialize_flags();
441 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
442 vm_exit_during_initialization(
443 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
444 _gc_mode->name()));
445 }
446 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
447 vm_exit_during_initialization(
448 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
449 _gc_mode->name()));
450 }
451 }
452
453 void ShenandoahHeap::initialize_heuristics() {
454 assert(_gc_mode != nullptr, "Must be initialized");
455 _heuristics = _gc_mode->initialize_heuristics();
456
457 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
458 vm_exit_during_initialization(
459 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
460 _heuristics->name()));
461 }
462 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
463 vm_exit_during_initialization(
464 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
465 _heuristics->name()));
466 }
467 }
468
469 #ifdef _MSC_VER
470 #pragma warning( push )
471 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
472 #endif
473
474 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
475 CollectedHeap(),
476 _initial_size(0),
477 _used(0),
478 _committed(0),
479 _bytes_allocated_since_gc_start(0),
480 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
481 _workers(nullptr),
482 _safepoint_workers(nullptr),
483 _heap_region_special(false),
484 _num_regions(0),
485 _regions(nullptr),
486 _update_refs_iterator(this),
487 _gc_state_changed(false),
488 _gc_no_progress_count(0),
489 _control_thread(nullptr),
490 _shenandoah_policy(policy),
491 _gc_mode(nullptr),
492 _heuristics(nullptr),
493 _free_set(nullptr),
494 _pacer(nullptr),
495 _verifier(nullptr),
496 _phase_timings(nullptr),
497 _monitoring_support(nullptr),
498 _memory_pool(nullptr),
499 _stw_memory_manager("Shenandoah Pauses"),
500 _cycle_memory_manager("Shenandoah Cycles"),
501 _gc_timer(new ConcurrentGCTimer()),
502 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
503 _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
504 _marking_context(nullptr),
505 _bitmap_size(0),
506 _bitmap_regions_per_slice(0),
507 _bitmap_bytes_per_slice(0),
508 _bitmap_region_special(false),
509 _aux_bitmap_region_special(false),
510 _liveness_cache(nullptr),
511 _collection_set(nullptr)
512 {
513 // Initialize GC mode early, so we can adjust barrier support
514 initialize_mode();
515 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
516
517 _max_workers = MAX2(_max_workers, 1U);
518 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
519 if (_workers == nullptr) {
520 vm_exit_during_initialization("Failed necessary allocation.");
521 } else {
522 _workers->initialize_workers();
523 }
524
525 if (ParallelGCThreads > 1) {
526 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
527 ParallelGCThreads);
528 _safepoint_workers->initialize_workers();
529 }
530 }
531
532 #ifdef _MSC_VER
533 #pragma warning( pop )
534 #endif
535
536 class ShenandoahResetBitmapTask : public WorkerTask {
537 private:
538 ShenandoahRegionIterator _regions;
539
540 public:
541 ShenandoahResetBitmapTask() :
542 WorkerTask("Shenandoah Reset Bitmap") {}
543
544 void work(uint worker_id) {
545 ShenandoahHeapRegion* region = _regions.next();
546 ShenandoahHeap* heap = ShenandoahHeap::heap();
547 ShenandoahMarkingContext* const ctx = heap->marking_context();
548 while (region != nullptr) {
549 if (heap->is_bitmap_slice_committed(region)) {
550 ctx->clear_bitmap(region);
551 }
552 region = _regions.next();
553 }
554 }
555 };
556
557 void ShenandoahHeap::reset_mark_bitmap() {
558 assert_gc_workers(_workers->active_workers());
559 mark_incomplete_marking_context();
560
561 ShenandoahResetBitmapTask task;
562 _workers->run_task(&task);
563 }
564
565 void ShenandoahHeap::print_on(outputStream* st) const {
566 st->print_cr("Shenandoah Heap");
567 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
568 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
569 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
570 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
571 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
572 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
573 num_regions(),
574 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
575 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
576
577 st->print("Status: ");
578 if (has_forwarded_objects()) st->print("has forwarded objects, ");
579 if (is_concurrent_mark_in_progress()) st->print("marking, ");
580 if (is_evacuation_in_progress()) st->print("evacuating, ");
581 if (is_update_refs_in_progress()) st->print("updating refs, ");
582 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
583 if (is_full_gc_in_progress()) st->print("full gc, ");
584 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
585 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
586 if (is_concurrent_strong_root_in_progress() &&
587 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
588
589 if (cancelled_gc()) {
590 st->print("cancelled");
591 } else {
592 st->print("not cancelled");
593 }
594 st->cr();
595
596 st->print_cr("Reserved region:");
597 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
598 p2i(reserved_region().start()),
599 p2i(reserved_region().end()));
610 st->cr();
611 MetaspaceUtils::print_on(st);
612
613 if (Verbose) {
614 st->cr();
615 print_heap_regions_on(st);
616 }
617 }
618
619 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
620 public:
621 void do_thread(Thread* thread) {
622 assert(thread != nullptr, "Sanity");
623 assert(thread->is_Worker_thread(), "Only worker thread expected");
624 ShenandoahThreadLocalData::initialize_gclab(thread);
625 }
626 };
627
628 void ShenandoahHeap::post_initialize() {
629 CollectedHeap::post_initialize();
630 MutexLocker ml(Threads_lock);
631
632 ShenandoahInitWorkerGCLABClosure init_gclabs;
633 _workers->threads_do(&init_gclabs);
634
635 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
636 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
637 _workers->set_initialize_gclab();
638 if (_safepoint_workers != nullptr) {
639 _safepoint_workers->threads_do(&init_gclabs);
640 _safepoint_workers->set_initialize_gclab();
641 }
642
643 _heuristics->initialize();
644
645 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
646 }
647
648 size_t ShenandoahHeap::used() const {
649 return Atomic::load(&_used);
650 }
651
652 size_t ShenandoahHeap::committed() const {
653 return Atomic::load(&_committed);
654 }
655
656 size_t ShenandoahHeap::available() const {
657 return free_set()->available();
658 }
659
660 void ShenandoahHeap::increase_committed(size_t bytes) {
661 shenandoah_assert_heaplocked_or_safepoint();
662 _committed += bytes;
663 }
664
665 void ShenandoahHeap::decrease_committed(size_t bytes) {
666 shenandoah_assert_heaplocked_or_safepoint();
667 _committed -= bytes;
668 }
669
670 void ShenandoahHeap::increase_used(size_t bytes) {
671 Atomic::add(&_used, bytes, memory_order_relaxed);
672 }
673
674 void ShenandoahHeap::set_used(size_t bytes) {
675 Atomic::store(&_used, bytes);
676 }
677
678 void ShenandoahHeap::decrease_used(size_t bytes) {
679 assert(used() >= bytes, "never decrease heap size by more than we've left");
680 Atomic::sub(&_used, bytes, memory_order_relaxed);
681 }
682
683 void ShenandoahHeap::increase_allocated(size_t bytes) {
684 Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
685 }
686
687 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
688 size_t bytes = words * HeapWordSize;
689 if (!waste) {
690 increase_used(bytes);
691 }
692 increase_allocated(bytes);
693 if (ShenandoahPacing) {
694 control_thread()->pacing_notify_alloc(words);
695 if (waste) {
696 pacer()->claim_for_alloc<true>(words);
697 }
698 }
699 }
700
701 size_t ShenandoahHeap::capacity() const {
702 return committed();
703 }
704
705 size_t ShenandoahHeap::max_capacity() const {
706 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
707 }
708
709 size_t ShenandoahHeap::soft_max_capacity() const {
710 size_t v = Atomic::load(&_soft_max_size);
711 assert(min_capacity() <= v && v <= max_capacity(),
712 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
713 min_capacity(), v, max_capacity());
714 return v;
715 }
716
806 size_t old_soft_max = soft_max_capacity();
807 if (new_soft_max != old_soft_max) {
808 new_soft_max = MAX2(min_capacity(), new_soft_max);
809 new_soft_max = MIN2(max_capacity(), new_soft_max);
810 if (new_soft_max != old_soft_max) {
811 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
812 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
813 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
814 );
815 set_soft_max_capacity(new_soft_max);
816 return true;
817 }
818 }
819 return false;
820 }
821
822 void ShenandoahHeap::notify_heap_changed() {
823 // Update monitoring counters when we took a new region. This amortizes the
824 // update costs on slow path.
825 monitoring_support()->notify_heap_changed();
826
827 // This is called from allocation path, and thus should be fast.
828 _heap_changed.try_set();
829 }
830
831 void ShenandoahHeap::set_forced_counters_update(bool value) {
832 monitoring_support()->set_forced_counters_update(value);
833 }
834
835 void ShenandoahHeap::handle_force_counters_update() {
836 monitoring_support()->handle_force_counters_update();
837 }
838
839 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
840 // New object should fit the GCLAB size
841 size_t min_size = MAX2(size, PLAB::min_size());
842
843 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
844 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
845 new_size = MIN2(new_size, PLAB::max_size());
846 new_size = MAX2(new_size, PLAB::min_size());
847
848 // Record new heuristic value even if we take any shortcut. This captures
849 // the case when moderately-sized objects always take a shortcut. At some point,
850 // heuristics should catch up with them.
851 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
852
853 if (new_size < size) {
854 // New size still does not fit the object. Fall back to shared allocation.
855 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
856 return nullptr;
857 }
858
859 // Retire current GCLAB, and allocate a new one.
860 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
861 gclab->retire();
862
863 size_t actual_size = 0;
864 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
865 if (gclab_buf == nullptr) {
866 return nullptr;
867 }
868
869 assert (size <= actual_size, "allocation should fit");
870
871 // ...and clear or zap just allocated TLAB, if needed.
872 if (ZeroTLAB) {
873 Copy::zero_to_words(gclab_buf, actual_size);
874 } else if (ZapTLAB) {
875 // Skip mangling the space corresponding to the object header to
876 // ensure that the returned space is not considered parsable by
877 // any concurrent GC thread.
878 size_t hdr_size = oopDesc::header_size();
879 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
880 }
881 gclab->set_buf(gclab_buf, actual_size);
882 return gclab->allocate(size);
883 }
884
885 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
886 size_t requested_size,
887 size_t* actual_size) {
888 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
889 HeapWord* res = allocate_memory(req);
890 if (res != nullptr) {
891 *actual_size = req.actual_size();
892 } else {
893 *actual_size = 0;
894 }
895 return res;
896 }
897
898 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
899 size_t word_size,
900 size_t* actual_size) {
901 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
902 HeapWord* res = allocate_memory(req);
903 if (res != nullptr) {
904 *actual_size = req.actual_size();
913 bool in_new_region = false;
914 HeapWord* result = nullptr;
915
916 if (req.is_mutator_alloc()) {
917 if (ShenandoahPacing) {
918 pacer()->pace_for_alloc(req.size());
919 pacer_epoch = pacer()->epoch();
920 }
921
922 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
923 result = allocate_memory_under_lock(req, in_new_region);
924 }
925
926 // Check that gc overhead is not exceeded.
927 //
928 // Shenandoah will grind along for quite a while allocating one
929 // object at a time using shared (non-tlab) allocations. This check
930 // is testing that the GC overhead limit has not been exceeded.
931 // This will notify the collector to start a cycle, but will raise
932 // an OOME to the mutator if the last Full GCs have not made progress.
933 if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
934 control_thread()->handle_alloc_failure(req, false);
935 return nullptr;
936 }
937
938 if (result == nullptr) {
939 // Block until control thread reacted, then retry allocation.
940 //
941 // It might happen that one of the threads requesting allocation would unblock
942 // way later after GC happened, only to fail the second allocation, because
943 // other threads have already depleted the free storage. In this case, a better
944 // strategy is to try again, until at least one full GC has completed.
945 //
946 // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
947 // a) We experienced a GC that had good progress, or
948 // b) We experienced at least one Full GC (whether or not it had good progress)
949 //
950 // TODO: Consider GLOBAL GC rather than Full GC to remediate OOM condition: https://bugs.openjdk.org/browse/JDK-8335910
951
952 size_t original_count = shenandoah_policy()->full_gc_count();
953 while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
954 control_thread()->handle_alloc_failure(req, true);
955 result = allocate_memory_under_lock(req, in_new_region);
956 }
957 if (result != nullptr) {
958 // If our allocation request has been satisifed after it initially failed, we count this as good gc progress
959 notify_gc_progress();
960 }
961 if (log_is_enabled(Debug, gc, alloc)) {
962 ResourceMark rm;
963 log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
964 ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
965 Thread::current()->name(), p2i(result), req.type_string(), req.size(),
966 original_count, get_gc_no_progress_count());
967 }
968 }
969 } else {
970 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
971 result = allocate_memory_under_lock(req, in_new_region);
972 // Do not call handle_alloc_failure() here, because we cannot block.
973 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
974 }
975
976 if (in_new_region) {
977 notify_heap_changed();
978 }
979
980 if (result != nullptr) {
981 size_t requested = req.size();
982 size_t actual = req.actual_size();
983
984 assert (req.is_lab_alloc() || (requested == actual),
985 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
986 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
987
988 if (req.is_mutator_alloc()) {
989 notify_mutator_alloc_words(actual, false);
990
991 // If we requested more than we were granted, give the rest back to pacer.
992 // This only matters if we are in the same pacing epoch: do not try to unpace
993 // over the budget for the other phase.
994 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
995 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
996 }
997 } else {
998 increase_used(actual*HeapWordSize);
999 }
1000 }
1001
1002 return result;
1003 }
1004
1005 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1006 // If we are dealing with mutator allocation, then we may need to block for safepoint.
1007 // We cannot block for safepoint for GC allocations, because there is a high chance
1008 // we are already running at safepoint or from stack watermark machinery, and we cannot
1009 // block again.
1010 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1011 return _free_set->allocate(req, in_new_region);
1012 }
1013
1014 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1015 bool* gc_overhead_limit_was_exceeded) {
1016 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1017 return allocate_memory(req);
1018 }
1019
1020 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1021 size_t size,
1022 Metaspace::MetadataType mdtype) {
1023 MetaWord* result;
1024
1025 // Inform metaspace OOM to GC heuristics if class unloading is possible.
1026 if (heuristics()->can_unload_classes()) {
1027 ShenandoahHeuristics* h = heuristics();
1028 h->record_metaspace_oom();
1029 }
1030
1031 // Expand and retry allocation
1032 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1033 if (result != nullptr) {
1034 return result;
1035 }
1036
1037 // Start full GC
1038 collect(GCCause::_metadata_GC_clear_soft_refs);
1039
1040 // Retry allocation
1041 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1042 if (result != nullptr) {
1043 return result;
1044 }
1045
1046 // Expand and retry allocation
1047 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1105 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1106 _sh->marked_object_iterate(r, &cl);
1107
1108 if (ShenandoahPacing) {
1109 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1110 }
1111
1112 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1113 break;
1114 }
1115 }
1116 }
1117 };
1118
1119 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1120 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1121 workers()->run_task(&task);
1122 }
1123
1124 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1125 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
1126 // This thread went through the OOM during evac protocol and it is safe to return
1127 // the forward pointer. It must not attempt to evacuate any more.
1128 return ShenandoahBarrierSet::resolve_forwarded(p);
1129 }
1130
1131 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1132
1133 size_t size = p->size();
1134
1135 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
1136
1137 bool alloc_from_gclab = true;
1138 HeapWord* copy = nullptr;
1139
1140 #ifdef ASSERT
1141 if (ShenandoahOOMDuringEvacALot &&
1142 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1143 copy = nullptr;
1144 } else {
1145 #endif
1146 if (UseTLAB) {
1147 copy = allocate_from_gclab(thread, size);
1148 }
1149 if (copy == nullptr) {
1150 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
1151 copy = allocate_memory(req);
1152 alloc_from_gclab = false;
1153 }
1154 #ifdef ASSERT
1155 }
1156 #endif
1157
1158 if (copy == nullptr) {
1159 control_thread()->handle_alloc_failure_evac(size);
1160
1161 _oom_evac_handler.handle_out_of_memory_during_evacuation();
1162
1163 return ShenandoahBarrierSet::resolve_forwarded(p);
1164 }
1165
1166 // Copy the object:
1167 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1168
1169 // Try to install the new forwarding pointer.
1170 oop copy_val = cast_to_oop(copy);
1171 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1172 if (result == copy_val) {
1173 // Successfully evacuated. Our copy is now the public one!
1174 ContinuationGCSupport::relativize_stack_chunk(copy_val);
1175 shenandoah_assert_correct(nullptr, copy_val);
1176 return copy_val;
1177 } else {
1178 // Failed to evacuate. We need to deal with the object that is left behind. Since this
1179 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1180 // But if it happens to contain references to evacuated regions, those references would
1181 // not get updated for this stale copy during this cycle, and we will crash while scanning
1182 // it the next cycle.
1183 //
1184 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
1185 // object will overwrite this stale copy, or the filler object on LAB retirement will
1186 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
1187 // have to explicitly overwrite the copy with the filler object. With that overwrite,
1188 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1189 if (alloc_from_gclab) {
1190 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1191 } else {
1192 fill_with_object(copy, size);
1193 shenandoah_assert_correct(nullptr, copy_val);
1194 }
1195 shenandoah_assert_correct(nullptr, result);
1196 return result;
1197 }
1198 }
1199
1200 void ShenandoahHeap::trash_cset_regions() {
1201 ShenandoahHeapLocker locker(lock());
1202
1203 ShenandoahCollectionSet* set = collection_set();
1204 ShenandoahHeapRegion* r;
1205 set->clear_current_index();
1206 while ((r = set->next()) != nullptr) {
1207 r->make_trash();
1208 }
1209 collection_set()->clear();
1210 }
1211
1212 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1213 st->print_cr("Heap Regions:");
1214 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1215 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1216 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1217 st->print_cr("UWM=update watermark, U=used");
1218 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1219 st->print_cr("S=shared allocs, L=live data");
1220 st->print_cr("CP=critical pins");
1221
1222 for (size_t i = 0; i < num_regions(); i++) {
1223 get_region(i)->print_on(st);
1224 }
1225 }
1226
1227 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1228 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1229
1230 oop humongous_obj = cast_to_oop(start->bottom());
1231 size_t size = humongous_obj->size();
1232 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1233 size_t index = start->index() + required_regions - 1;
1234
1235 assert(!start->has_live(), "liveness must be zero");
1236
1237 for(size_t i = 0; i < required_regions; i++) {
1238 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1239 // as it expects that every region belongs to a humongous region starting with a humongous start region.
1240 ShenandoahHeapRegion* region = get_region(index --);
1241
1242 assert(region->is_humongous(), "expect correct humongous start or continuation");
1243 assert(!region->is_cset(), "Humongous region should not be in collection set");
1244
1245 region->make_trash_immediate();
1246 }
1247 }
1248
1249 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1250 public:
1251 ShenandoahCheckCleanGCLABClosure() {}
1252 void do_thread(Thread* thread) {
1253 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1254 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1255 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1256 }
1257 };
1258
1259 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1260 private:
1261 bool const _resize;
1262 public:
1263 ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1264 void do_thread(Thread* thread) {
1265 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1266 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1267 gclab->retire();
1268 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1269 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1270 }
1271 }
1272 };
1273
1274 void ShenandoahHeap::labs_make_parsable() {
1275 assert(UseTLAB, "Only call with UseTLAB");
1276
1277 ShenandoahRetireGCLABClosure cl(false);
1278
1279 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1280 ThreadLocalAllocBuffer& tlab = t->tlab();
1281 tlab.make_parsable();
1282 cl.do_thread(t);
1283 }
1284
1285 workers()->threads_do(&cl);
1286 }
1287
1288 void ShenandoahHeap::tlabs_retire(bool resize) {
1289 assert(UseTLAB, "Only call with UseTLAB");
1290 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1384 }
1385
1386 void ShenandoahHeap::print_tracing_info() const {
1387 LogTarget(Info, gc, stats) lt;
1388 if (lt.is_enabled()) {
1389 ResourceMark rm;
1390 LogStream ls(lt);
1391
1392 phase_timings()->print_global_on(&ls);
1393
1394 ls.cr();
1395 ls.cr();
1396
1397 shenandoah_policy()->print_gc_stats(&ls);
1398
1399 ls.cr();
1400 ls.cr();
1401 }
1402 }
1403
1404 void ShenandoahHeap::verify(VerifyOption vo) {
1405 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1406 if (ShenandoahVerify) {
1407 verifier()->verify_generic(vo);
1408 } else {
1409 // TODO: Consider allocating verification bitmaps on demand,
1410 // and turn this on unconditionally.
1411 }
1412 }
1413 }
1414 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1415 return _free_set->capacity();
1416 }
1417
1418 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1419 private:
1420 MarkBitMap* _bitmap;
1421 ShenandoahScanObjectStack* _oop_stack;
1422 ShenandoahHeap* const _heap;
1423 ShenandoahMarkingContext* const _marking_context;
1733 } else {
1734 heap_region_iterate(blk);
1735 }
1736 }
1737
1738 class ShenandoahRendezvousClosure : public HandshakeClosure {
1739 public:
1740 inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1741 inline void do_thread(Thread* thread) {}
1742 };
1743
1744 void ShenandoahHeap::rendezvous_threads(const char* name) {
1745 ShenandoahRendezvousClosure cl(name);
1746 Handshake::execute(&cl);
1747 }
1748
1749 void ShenandoahHeap::recycle_trash() {
1750 free_set()->recycle_trash();
1751 }
1752
1753 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1754 private:
1755 ShenandoahMarkingContext* const _ctx;
1756 public:
1757 ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1758
1759 void heap_region_do(ShenandoahHeapRegion* r) {
1760 if (r->is_active()) {
1761 // Reset live data and set TAMS optimistically. We would recheck these under the pause
1762 // anyway to capture any updates that happened since now.
1763 r->clear_live_data();
1764 _ctx->capture_top_at_mark_start(r);
1765 }
1766 }
1767
1768 bool is_thread_safe() { return true; }
1769 };
1770
1771 void ShenandoahHeap::prepare_gc() {
1772 reset_mark_bitmap();
1773
1774 ShenandoahResetUpdateRegionStateClosure cl;
1775 parallel_heap_region_iterate(&cl);
1776 }
1777
1778 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1779 private:
1780 ShenandoahMarkingContext* const _ctx;
1781 ShenandoahHeapLock* const _lock;
1782
1783 public:
1784 ShenandoahFinalMarkUpdateRegionStateClosure() :
1785 _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1786
1787 void heap_region_do(ShenandoahHeapRegion* r) {
1788 if (r->is_active()) {
1789 // All allocations past TAMS are implicitly live, adjust the region data.
1790 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1791 HeapWord *tams = _ctx->top_at_mark_start(r);
1792 HeapWord *top = r->top();
1793 if (top > tams) {
1794 r->increase_live_data_alloc_words(pointer_delta(top, tams));
1795 }
1796
1797 // We are about to select the collection set, make sure it knows about
1798 // current pinning status. Also, this allows trashing more regions that
1799 // now have their pinning status dropped.
1800 if (r->is_pinned()) {
1801 if (r->pin_count() == 0) {
1802 ShenandoahHeapLocker locker(_lock);
1803 r->make_unpinned();
1804 }
1805 } else {
1806 if (r->pin_count() > 0) {
1807 ShenandoahHeapLocker locker(_lock);
1808 r->make_pinned();
1809 }
1810 }
1811
1812 // Remember limit for updating refs. It's guaranteed that we get no
1813 // from-space-refs written from here on.
1814 r->set_update_watermark_at_safepoint(r->top());
1815 } else {
1816 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1817 assert(_ctx->top_at_mark_start(r) == r->top(),
1818 "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1819 }
1820 }
1821
1822 bool is_thread_safe() { return true; }
1823 };
1824
1825 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1826 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1827 {
1828 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1829 ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1830 ShenandoahFinalMarkUpdateRegionStateClosure cl;
1831 parallel_heap_region_iterate(&cl);
1832
1833 assert_pinned_region_status();
1834 }
1835
1836 {
1837 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1838 ShenandoahPhaseTimings::degen_gc_choose_cset);
1839 ShenandoahHeapLocker locker(lock());
1840 _collection_set->clear();
1841 heuristics()->choose_collection_set(_collection_set);
1842 }
1843
1844 {
1845 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1846 ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1847 ShenandoahHeapLocker locker(lock());
1848 _free_set->rebuild();
1849 }
1850 }
1851
1852 void ShenandoahHeap::do_class_unloading() {
1853 _unloader.unload();
1854 }
1855
1856 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1857 // Weak refs processing
1858 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1859 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1860 ShenandoahTimingsTracker t(phase);
1861 ShenandoahGCWorkerPhase worker_phase(phase);
1862 ref_processor()->process_references(phase, workers(), false /* concurrent */);
1863 }
1864
1865 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1866 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1867
1868 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1869 // make them parsable for update code to work correctly. Plus, we can compute new sizes
1870 // for future GCLABs here.
1871 if (UseTLAB) {
1872 ShenandoahGCPhase phase(concurrent ?
1873 ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1874 ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1875 gclabs_retire(ResizeTLAB);
1876 }
1877
1878 _update_refs_iterator.reset();
1879 }
1880
1881 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1882 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1883 if (_gc_state_changed) {
1884 _gc_state_changed = false;
1885 char state = gc_state();
1886 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1887 ShenandoahThreadLocalData::set_gc_state(t, state);
1888 }
1889 }
1890 }
1891
1892 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1893 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1894 _gc_state.set_cond(mask, value);
1895 _gc_state_changed = true;
1896 }
1897
1898 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1899 assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1900 set_gc_state(MARKING, in_progress);
1901 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1902 }
1903
1904 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1905 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1906 set_gc_state(EVACUATION, in_progress);
1907 }
1908
1909 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1910 if (in_progress) {
1911 _concurrent_strong_root_in_progress.set();
1912 } else {
1913 _concurrent_strong_root_in_progress.unset();
1914 }
1915 }
1916
1917 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1918 set_gc_state(WEAK_ROOTS, cond);
1919 }
1920
1921 GCTracer* ShenandoahHeap::tracer() {
1922 return shenandoah_policy()->tracer();
1923 }
1924
1925 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1926 return _free_set->used();
1927 }
1928
1929 bool ShenandoahHeap::try_cancel_gc() {
1930 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1931 return prev == CANCELLABLE;
1932 }
1933
1934 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1935 if (try_cancel_gc()) {
1936 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1937 log_info(gc)("%s", msg.buffer());
1938 Events::log(Thread::current(), "%s", msg.buffer());
1939 }
1940 }
1941
1942 uint ShenandoahHeap::max_workers() {
1943 return _max_workers;
1944 }
1945
1946 void ShenandoahHeap::stop() {
1947 // The shutdown sequence should be able to terminate when GC is running.
1948
1949 // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
1950 _shenandoah_policy->record_shutdown();
1951
1952 // Step 1. Notify control thread that we are in shutdown.
1953 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1954 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1955 control_thread()->prepare_for_graceful_shutdown();
1956
1957 // Step 2. Notify GC workers that we are cancelling GC.
1958 cancel_gc(GCCause::_shenandoah_stop_vm);
2042 }
2043
2044 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2045 set_gc_state(HAS_FORWARDED, cond);
2046 }
2047
2048 void ShenandoahHeap::set_unload_classes(bool uc) {
2049 _unload_classes.set_cond(uc);
2050 }
2051
2052 bool ShenandoahHeap::unload_classes() const {
2053 return _unload_classes.is_set();
2054 }
2055
2056 address ShenandoahHeap::in_cset_fast_test_addr() {
2057 ShenandoahHeap* heap = ShenandoahHeap::heap();
2058 assert(heap->collection_set() != nullptr, "Sanity");
2059 return (address) heap->collection_set()->biased_map_address();
2060 }
2061
2062 size_t ShenandoahHeap::bytes_allocated_since_gc_start() const {
2063 return Atomic::load(&_bytes_allocated_since_gc_start);
2064 }
2065
2066 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2067 Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
2068 }
2069
2070 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2071 _degenerated_gc_in_progress.set_cond(in_progress);
2072 }
2073
2074 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2075 _full_gc_in_progress.set_cond(in_progress);
2076 }
2077
2078 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2079 assert (is_full_gc_in_progress(), "should be");
2080 _full_gc_move_in_progress.set_cond(in_progress);
2081 }
2082
2083 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2084 set_gc_state(UPDATEREFS, in_progress);
2085 }
2086
2087 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2111 if (r->is_active()) {
2112 if (r->is_pinned()) {
2113 if (r->pin_count() == 0) {
2114 r->make_unpinned();
2115 }
2116 } else {
2117 if (r->pin_count() > 0) {
2118 r->make_pinned();
2119 }
2120 }
2121 }
2122 }
2123
2124 assert_pinned_region_status();
2125 }
2126
2127 #ifdef ASSERT
2128 void ShenandoahHeap::assert_pinned_region_status() {
2129 for (size_t i = 0; i < num_regions(); i++) {
2130 ShenandoahHeapRegion* r = get_region(i);
2131 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2132 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2133 }
2134 }
2135 #endif
2136
2137 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2138 return _gc_timer;
2139 }
2140
2141 void ShenandoahHeap::prepare_concurrent_roots() {
2142 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2143 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2144 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2145 set_concurrent_weak_root_in_progress(true);
2146 if (unload_classes()) {
2147 _unloader.prepare();
2148 }
2149 }
2150
2151 void ShenandoahHeap::finish_concurrent_roots() {
2152 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2167 } else {
2168 // Use ConcGCThreads outside safepoints
2169 assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2170 ConcGCThreads, nworkers);
2171 }
2172 }
2173 #endif
2174
2175 ShenandoahVerifier* ShenandoahHeap::verifier() {
2176 guarantee(ShenandoahVerify, "Should be enabled");
2177 assert (_verifier != nullptr, "sanity");
2178 return _verifier;
2179 }
2180
2181 template<bool CONCURRENT>
2182 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2183 private:
2184 ShenandoahHeap* _heap;
2185 ShenandoahRegionIterator* _regions;
2186 public:
2187 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2188 WorkerTask("Shenandoah Update References"),
2189 _heap(ShenandoahHeap::heap()),
2190 _regions(regions) {
2191 }
2192
2193 void work(uint worker_id) {
2194 if (CONCURRENT) {
2195 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2196 ShenandoahSuspendibleThreadSetJoiner stsj;
2197 do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2198 } else {
2199 ShenandoahParallelWorkerSession worker_session(worker_id);
2200 do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2201 }
2202 }
2203
2204 private:
2205 template<class T>
2206 void do_work(uint worker_id) {
2207 T cl;
2208 if (CONCURRENT && (worker_id == 0)) {
2209 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2210 // results of evacuation. These reserves are no longer necessary because evacuation has completed.
2211 size_t cset_regions = _heap->collection_set()->count();
2212 // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled because
2213 // we need the reclaimed collection set regions to replenish the collector reserves
2214 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2215 }
2216 // If !CONCURRENT, there's no value in expanding Mutator free set
2217
2218 ShenandoahHeapRegion* r = _regions->next();
2219 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2220 while (r != nullptr) {
2221 HeapWord* update_watermark = r->get_update_watermark();
2222 assert (update_watermark >= r->bottom(), "sanity");
2223 if (r->is_active() && !r->is_cset()) {
2224 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2225 }
2226 if (ShenandoahPacing) {
2227 _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2228 }
2229 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2230 return;
2231 }
2232 r = _regions->next();
2233 }
2234 }
2235 };
2236
2237 void ShenandoahHeap::update_heap_references(bool concurrent) {
2238 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2239
2240 if (concurrent) {
2241 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2242 workers()->run_task(&task);
2243 } else {
2244 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2245 workers()->run_task(&task);
2246 }
2247 }
2248
2249
2250 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2251 private:
2252 ShenandoahHeapLock* const _lock;
2253
2254 public:
2255 ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2256
2257 void heap_region_do(ShenandoahHeapRegion* r) {
2258 // Drop unnecessary "pinned" state from regions that does not have CP marks
2259 // anymore, as this would allow trashing them.
2260
2261 if (r->is_active()) {
2262 if (r->is_pinned()) {
2263 if (r->pin_count() == 0) {
2264 ShenandoahHeapLocker locker(_lock);
2265 r->make_unpinned();
2266 }
2267 } else {
2268 if (r->pin_count() > 0) {
2269 ShenandoahHeapLocker locker(_lock);
2270 r->make_pinned();
2271 }
2272 }
2273 }
2274 }
2275
2276 bool is_thread_safe() { return true; }
2277 };
2278
2279 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2280 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2281 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2282
2283 {
2284 ShenandoahGCPhase phase(concurrent ?
2285 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2286 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2287 ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2288 parallel_heap_region_iterate(&cl);
2289
2290 assert_pinned_region_status();
2291 }
2292
2293 {
2294 ShenandoahGCPhase phase(concurrent ?
2295 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2296 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2297 trash_cset_regions();
2298 }
2299 }
2300
2301 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2302 {
2303 ShenandoahGCPhase phase(concurrent ?
2304 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2305 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2306 ShenandoahHeapLocker locker(lock());
2307 _free_set->rebuild();
2308 }
2309 }
2310
2311 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2312 print_on(st);
2313 st->cr();
2314 print_heap_regions_on(st);
2315 }
2316
2317 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2318 size_t slice = r->index() / _bitmap_regions_per_slice;
2319
2320 size_t regions_from = _bitmap_regions_per_slice * slice;
2321 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2322 for (size_t g = regions_from; g < regions_to; g++) {
2323 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2324 if (skip_self && g == r->index()) continue;
2325 if (get_region(g)->is_committed()) {
2326 return true;
2327 }
2410 void ShenandoahHeap::initialize_serviceability() {
2411 _memory_pool = new ShenandoahMemoryPool(this);
2412 _cycle_memory_manager.add_pool(_memory_pool);
2413 _stw_memory_manager.add_pool(_memory_pool);
2414 }
2415
2416 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2417 GrowableArray<GCMemoryManager*> memory_managers(2);
2418 memory_managers.append(&_cycle_memory_manager);
2419 memory_managers.append(&_stw_memory_manager);
2420 return memory_managers;
2421 }
2422
2423 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2424 GrowableArray<MemoryPool*> memory_pools(1);
2425 memory_pools.append(_memory_pool);
2426 return memory_pools;
2427 }
2428
2429 MemoryUsage ShenandoahHeap::memory_usage() {
2430 return _memory_pool->get_memory_usage();
2431 }
2432
2433 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2434 _heap(ShenandoahHeap::heap()),
2435 _index(0) {}
2436
2437 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2438 _heap(heap),
2439 _index(0) {}
2440
2441 void ShenandoahRegionIterator::reset() {
2442 _index = 0;
2443 }
2444
2445 bool ShenandoahRegionIterator::has_next() const {
2446 return _index < _heap->num_regions();
2447 }
2448
2449 char ShenandoahHeap::gc_state() const {
2450 return _gc_state.raw_value();
2552 }
2553
2554 // No unclaimed tail at the end of archive space.
2555 assert(cur == end,
2556 "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT,
2557 p2i(cur), p2i(end));
2558
2559 // Region bounds are good.
2560 ShenandoahHeapRegion* begin_reg = heap_region_containing(start);
2561 ShenandoahHeapRegion* end_reg = heap_region_containing(end);
2562 assert(begin_reg->is_regular(), "Must be");
2563 assert(end_reg->is_regular(), "Must be");
2564 assert(begin_reg->bottom() == start,
2565 "Must agree: archive-space-start: " PTR_FORMAT ", begin-region-bottom: " PTR_FORMAT,
2566 p2i(start), p2i(begin_reg->bottom()));
2567 assert(end_reg->top() == end,
2568 "Must agree: archive-space-end: " PTR_FORMAT ", end-region-top: " PTR_FORMAT,
2569 p2i(end), p2i(end_reg->top()));
2570 #endif
2571 }
|
1 /*
2 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "memory/allocation.hpp"
29 #include "memory/universe.hpp"
30
31 #include "gc/shared/classUnloadingContext.hpp"
32 #include "gc/shared/gcArguments.hpp"
33 #include "gc/shared/gcTimer.hpp"
34 #include "gc/shared/gcTraceTime.inline.hpp"
35 #include "gc/shared/locationPrinter.inline.hpp"
36 #include "gc/shared/memAllocator.hpp"
37 #include "gc/shared/plab.hpp"
38 #include "gc/shared/tlab_globals.hpp"
39
40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
42 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
43 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
44 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
45 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
46 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
47 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
48 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
49 #include "gc/shenandoah/shenandoahControlThread.hpp"
50 #include "gc/shenandoah/shenandoahFreeSet.hpp"
51 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
52 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
53 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
54 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
55 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
56 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
57 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
58 #include "gc/shenandoah/shenandoahInitLogger.hpp"
59 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
60 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
61 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
62 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
63 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
64 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
65 #include "gc/shenandoah/shenandoahPadding.hpp"
66 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
67 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
68 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
69 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
70 #include "gc/shenandoah/shenandoahSTWMark.hpp"
71 #include "gc/shenandoah/shenandoahUtils.hpp"
72 #include "gc/shenandoah/shenandoahVerifier.hpp"
73 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
74 #include "gc/shenandoah/shenandoahVMOperations.hpp"
75 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
76 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
77 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
78 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
79 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
80 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
81 #include "utilities/globalDefinitions.hpp"
82
83 #if INCLUDE_JFR
84 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
85 #endif
86
87 #include "cds/archiveHeapWriter.hpp"
88 #include "classfile/systemDictionary.hpp"
89 #include "code/codeCache.hpp"
90 #include "memory/classLoaderMetaspace.hpp"
91 #include "memory/metaspaceUtils.hpp"
92 #include "nmt/mallocTracker.hpp"
93 #include "nmt/memTracker.hpp"
94 #include "oops/compressedOops.inline.hpp"
95 #include "prims/jvmtiTagMap.hpp"
96 #include "runtime/atomic.hpp"
97 #include "runtime/globals.hpp"
98 #include "runtime/interfaceSupport.inline.hpp"
99 #include "runtime/java.hpp"
100 #include "runtime/orderAccess.hpp"
101 #include "runtime/safepointMechanism.hpp"
102 #include "runtime/stackWatermarkSet.hpp"
156 jint ShenandoahHeap::initialize() {
157 //
158 // Figure out heap sizing
159 //
160
161 size_t init_byte_size = InitialHeapSize;
162 size_t min_byte_size = MinHeapSize;
163 size_t max_byte_size = MaxHeapSize;
164 size_t heap_alignment = HeapAlignment;
165
166 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
167
168 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
169 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
170
171 _num_regions = ShenandoahHeapRegion::region_count();
172 assert(_num_regions == (max_byte_size / reg_size_bytes),
173 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
174 _num_regions, max_byte_size, reg_size_bytes);
175
176 size_t num_committed_regions = init_byte_size / reg_size_bytes;
177 num_committed_regions = MIN2(num_committed_regions, _num_regions);
178 assert(num_committed_regions <= _num_regions, "sanity");
179 _initial_size = num_committed_regions * reg_size_bytes;
180
181 size_t num_min_regions = min_byte_size / reg_size_bytes;
182 num_min_regions = MIN2(num_min_regions, _num_regions);
183 assert(num_min_regions <= _num_regions, "sanity");
184 _minimum_size = num_min_regions * reg_size_bytes;
185
186 // Default to max heap size.
187 _soft_max_size = _num_regions * reg_size_bytes;
188
189 _committed = _initial_size;
190
191 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
192 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
193 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
194
195 //
209 heap_rs.size(), heap_rs.page_size());
210
211 #if SHENANDOAH_OPTIMIZED_MARKTASK
212 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
213 // Fail if we ever attempt to address more than we can.
214 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
215 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
216 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
217 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
218 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
219 vm_exit_during_initialization("Fatal Error", buf);
220 }
221 #endif
222
223 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
224 if (!_heap_region_special) {
225 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
226 "Cannot commit heap memory");
227 }
228
229 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
230
231 // Now we know the number of regions and heap sizes, initialize the heuristics.
232 initialize_heuristics();
233
234 assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
235
236 //
237 // Worker threads must be initialized after the barrier is configured
238 //
239 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
240 if (_workers == nullptr) {
241 vm_exit_during_initialization("Failed necessary allocation.");
242 } else {
243 _workers->initialize_workers();
244 }
245
246 if (ParallelGCThreads > 1) {
247 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
248 _safepoint_workers->initialize_workers();
249 }
250
251 //
252 // Reserve and commit memory for bitmap(s)
253 //
254
255 size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
256 _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
257
258 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
259
260 guarantee(bitmap_bytes_per_region != 0,
261 "Bitmap bytes per region should not be zero");
262 guarantee(is_power_of_2(bitmap_bytes_per_region),
263 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
264
265 if (bitmap_page_size > bitmap_bytes_per_region) {
266 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
267 _bitmap_bytes_per_slice = bitmap_page_size;
268 } else {
269 _bitmap_regions_per_slice = 1;
270 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
271 }
272
273 guarantee(_bitmap_regions_per_slice >= 1,
274 "Should have at least one region per slice: " SIZE_FORMAT,
275 _bitmap_regions_per_slice);
276
277 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
278 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
279 _bitmap_bytes_per_slice, bitmap_page_size);
280
281 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
282 os::trace_page_sizes_for_requested_size("Mark Bitmap",
283 bitmap_size_orig, bitmap_page_size,
284 bitmap.base(),
285 bitmap.size(), bitmap.page_size());
286 MemTracker::record_virtual_memory_tag(bitmap.base(), mtGC);
287 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
288 _bitmap_region_special = bitmap.special();
289
290 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
291 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
292 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
293 if (!_bitmap_region_special) {
294 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
295 "Cannot commit bitmap memory");
296 }
297
298 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
299
300 if (ShenandoahVerify) {
301 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
302 os::trace_page_sizes_for_requested_size("Verify Bitmap",
303 bitmap_size_orig, bitmap_page_size,
304 verify_bitmap.base(),
305 verify_bitmap.size(), verify_bitmap.page_size());
306 if (!verify_bitmap.special()) {
307 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
308 "Cannot commit verification bitmap memory");
309 }
310 MemTracker::record_virtual_memory_tag(verify_bitmap.base(), mtGC);
311 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
312 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
313 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
314 }
315
316 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
317 size_t aux_bitmap_page_size = bitmap_page_size;
318
362 assert(is_aligned(req_addr, cset_align), "Should be aligned");
363 cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
364 if (cset_rs.is_reserved()) {
365 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
366 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
367 break;
368 }
369 }
370
371 if (_collection_set == nullptr) {
372 cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
373 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
374 }
375 os::trace_page_sizes_for_requested_size("Collection Set",
376 cset_size, cset_page_size,
377 cset_rs.base(),
378 cset_rs.size(), cset_rs.page_size());
379 }
380
381 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
382 _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
383 _free_set = new ShenandoahFreeSet(this, _num_regions);
384
385 {
386 ShenandoahHeapLocker locker(lock());
387
388 for (size_t i = 0; i < _num_regions; i++) {
389 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
390 bool is_committed = i < num_committed_regions;
391 void* loc = region_storage.base() + i * region_align;
392
393 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
394 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
395
396 _marking_context->initialize_top_at_mark_start(r);
397 _regions[i] = r;
398 assert(!collection_set()->is_in(i), "New region should not be in collection set");
399
400 _affiliations[i] = ShenandoahAffiliation::FREE;
401 }
402
403 // Initialize to complete
404 _marking_context->mark_complete();
405 size_t young_cset_regions, old_cset_regions;
406
407 // We are initializing free set. We ignore cset region tallies.
408 size_t first_old, last_old, num_old;
409 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
410 _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
411 }
412
413 if (AlwaysPreTouch) {
414 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
415 // before initialize() below zeroes it with initializing thread. For any given region,
416 // we touch the region and the corresponding bitmaps from the same thread.
417 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
418
419 _pretouch_heap_page_size = heap_page_size;
420 _pretouch_bitmap_page_size = bitmap_page_size;
421
422 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
423 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
424
425 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
426 _workers->run_task(&bcl);
427
428 ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
429 _workers->run_task(&hcl);
430 }
439 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
440 }
441
442 // There should probably be Shenandoah-specific options for these,
443 // just as there are G1-specific options.
444 {
445 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
446 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
447 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
448 }
449
450 _monitoring_support = new ShenandoahMonitoringSupport(this);
451 _phase_timings = new ShenandoahPhaseTimings(max_workers());
452 ShenandoahCodeRoots::initialize();
453
454 if (ShenandoahPacing) {
455 _pacer = new ShenandoahPacer(this);
456 _pacer->setup_for_idle();
457 }
458
459 initialize_controller();
460
461 print_init_logger();
462
463 return JNI_OK;
464 }
465
466 void ShenandoahHeap::initialize_controller() {
467 _control_thread = new ShenandoahControlThread();
468 }
469
470 void ShenandoahHeap::print_init_logger() const {
471 ShenandoahInitLogger::print();
472 }
473
474 void ShenandoahHeap::initialize_mode() {
475 if (ShenandoahGCMode != nullptr) {
476 if (strcmp(ShenandoahGCMode, "satb") == 0) {
477 _gc_mode = new ShenandoahSATBMode();
478 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
479 _gc_mode = new ShenandoahPassiveMode();
480 } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
481 _gc_mode = new ShenandoahGenerationalMode();
482 } else {
483 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
484 }
485 } else {
486 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
487 }
488 _gc_mode->initialize_flags();
489 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
490 vm_exit_during_initialization(
491 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
492 _gc_mode->name()));
493 }
494 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
495 vm_exit_during_initialization(
496 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
497 _gc_mode->name()));
498 }
499 }
500
501 void ShenandoahHeap::initialize_heuristics() {
502 _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
503 _global_generation->initialize_heuristics(mode());
504 }
505
506 #ifdef _MSC_VER
507 #pragma warning( push )
508 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
509 #endif
510
511 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
512 CollectedHeap(),
513 _gc_generation(nullptr),
514 _active_generation(nullptr),
515 _initial_size(0),
516 _committed(0),
517 _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
518 _workers(nullptr),
519 _safepoint_workers(nullptr),
520 _heap_region_special(false),
521 _num_regions(0),
522 _regions(nullptr),
523 _affiliations(nullptr),
524 _gc_state_changed(false),
525 _gc_no_progress_count(0),
526 _cancel_requested_time(0),
527 _update_refs_iterator(this),
528 _global_generation(nullptr),
529 _control_thread(nullptr),
530 _young_generation(nullptr),
531 _old_generation(nullptr),
532 _shenandoah_policy(policy),
533 _gc_mode(nullptr),
534 _free_set(nullptr),
535 _pacer(nullptr),
536 _verifier(nullptr),
537 _phase_timings(nullptr),
538 _mmu_tracker(),
539 _monitoring_support(nullptr),
540 _memory_pool(nullptr),
541 _stw_memory_manager("Shenandoah Pauses"),
542 _cycle_memory_manager("Shenandoah Cycles"),
543 _gc_timer(new ConcurrentGCTimer()),
544 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
545 _marking_context(nullptr),
546 _bitmap_size(0),
547 _bitmap_regions_per_slice(0),
548 _bitmap_bytes_per_slice(0),
549 _bitmap_region_special(false),
550 _aux_bitmap_region_special(false),
551 _liveness_cache(nullptr),
552 _collection_set(nullptr)
553 {
554 // Initialize GC mode early, many subsequent initialization procedures depend on it
555 initialize_mode();
556 }
557
558 #ifdef _MSC_VER
559 #pragma warning( pop )
560 #endif
561
562 void ShenandoahHeap::print_on(outputStream* st) const {
563 st->print_cr("Shenandoah Heap");
564 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
565 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
566 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
567 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
568 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
569 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
570 num_regions(),
571 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
572 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
573
574 st->print("Status: ");
575 if (has_forwarded_objects()) st->print("has forwarded objects, ");
576 if (is_concurrent_old_mark_in_progress()) st->print("old marking, ");
577 if (is_concurrent_young_mark_in_progress()) st->print("young marking, ");
578 if (is_evacuation_in_progress()) st->print("evacuating, ");
579 if (is_update_refs_in_progress()) st->print("updating refs, ");
580 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
581 if (is_full_gc_in_progress()) st->print("full gc, ");
582 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
583 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
584 if (is_concurrent_strong_root_in_progress() &&
585 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
586
587 if (cancelled_gc()) {
588 st->print("cancelled");
589 } else {
590 st->print("not cancelled");
591 }
592 st->cr();
593
594 st->print_cr("Reserved region:");
595 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
596 p2i(reserved_region().start()),
597 p2i(reserved_region().end()));
608 st->cr();
609 MetaspaceUtils::print_on(st);
610
611 if (Verbose) {
612 st->cr();
613 print_heap_regions_on(st);
614 }
615 }
616
617 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
618 public:
619 void do_thread(Thread* thread) {
620 assert(thread != nullptr, "Sanity");
621 assert(thread->is_Worker_thread(), "Only worker thread expected");
622 ShenandoahThreadLocalData::initialize_gclab(thread);
623 }
624 };
625
626 void ShenandoahHeap::post_initialize() {
627 CollectedHeap::post_initialize();
628 _mmu_tracker.initialize();
629
630 MutexLocker ml(Threads_lock);
631
632 ShenandoahInitWorkerGCLABClosure init_gclabs;
633 _workers->threads_do(&init_gclabs);
634
635 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
636 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
637 _workers->set_initialize_gclab();
638 if (_safepoint_workers != nullptr) {
639 _safepoint_workers->threads_do(&init_gclabs);
640 _safepoint_workers->set_initialize_gclab();
641 }
642
643 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
644 }
645
646 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
647 return _global_generation->heuristics();
648 }
649
650 size_t ShenandoahHeap::used() const {
651 return global_generation()->used();
652 }
653
654 size_t ShenandoahHeap::committed() const {
655 return Atomic::load(&_committed);
656 }
657
658 void ShenandoahHeap::increase_committed(size_t bytes) {
659 shenandoah_assert_heaplocked_or_safepoint();
660 _committed += bytes;
661 }
662
663 void ShenandoahHeap::decrease_committed(size_t bytes) {
664 shenandoah_assert_heaplocked_or_safepoint();
665 _committed -= bytes;
666 }
667
668 // For tracking usage based on allocations, it should be the case that:
669 // * The sum of regions::used == heap::used
670 // * The sum of a generation's regions::used == generation::used
671 // * The sum of a generation's humongous regions::free == generation::humongous_waste
672 // These invariants are checked by the verifier on GC safepoints.
673 //
674 // Additional notes:
675 // * When a mutator's allocation request causes a region to be retired, the
676 // free memory left in that region is considered waste. It does not contribute
677 // to the usage, but it _does_ contribute to allocation rate.
678 // * The bottom of a PLAB must be aligned on card size. In some cases this will
679 // require padding in front of the PLAB (a filler object). Because this padding
680 // is included in the region's used memory we include the padding in the usage
681 // accounting as waste.
682 // * Mutator allocations are used to compute an allocation rate. They are also
683 // sent to the Pacer for those purposes.
684 // * There are three sources of waste:
685 // 1. The padding used to align a PLAB on card size
686 // 2. Region's free is less than minimum TLAB size and is retired
687 // 3. The unused portion of memory in the last region of a humongous object
688 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
689 size_t actual_bytes = req.actual_size() * HeapWordSize;
690 size_t wasted_bytes = req.waste() * HeapWordSize;
691 ShenandoahGeneration* generation = generation_for(req.affiliation());
692
693 if (req.is_gc_alloc()) {
694 assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
695 increase_used(generation, actual_bytes + wasted_bytes);
696 } else {
697 assert(req.is_mutator_alloc(), "Expected mutator alloc here");
698 // padding and actual size both count towards allocation counter
699 generation->increase_allocated(actual_bytes + wasted_bytes);
700
701 // only actual size counts toward usage for mutator allocations
702 increase_used(generation, actual_bytes);
703
704 // notify pacer of both actual size and waste
705 notify_mutator_alloc_words(req.actual_size(), req.waste());
706
707 if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
708 increase_humongous_waste(generation,wasted_bytes);
709 }
710 }
711 }
712
713 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
714 generation->increase_humongous_waste(bytes);
715 if (!generation->is_global()) {
716 global_generation()->increase_humongous_waste(bytes);
717 }
718 }
719
720 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
721 generation->decrease_humongous_waste(bytes);
722 if (!generation->is_global()) {
723 global_generation()->decrease_humongous_waste(bytes);
724 }
725 }
726
727 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
728 generation->increase_used(bytes);
729 if (!generation->is_global()) {
730 global_generation()->increase_used(bytes);
731 }
732 }
733
734 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
735 generation->decrease_used(bytes);
736 if (!generation->is_global()) {
737 global_generation()->decrease_used(bytes);
738 }
739 }
740
741 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
742 if (ShenandoahPacing) {
743 control_thread()->pacing_notify_alloc(words);
744 if (waste > 0) {
745 pacer()->claim_for_alloc<true>(waste);
746 }
747 }
748 }
749
750 size_t ShenandoahHeap::capacity() const {
751 return committed();
752 }
753
754 size_t ShenandoahHeap::max_capacity() const {
755 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
756 }
757
758 size_t ShenandoahHeap::soft_max_capacity() const {
759 size_t v = Atomic::load(&_soft_max_size);
760 assert(min_capacity() <= v && v <= max_capacity(),
761 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
762 min_capacity(), v, max_capacity());
763 return v;
764 }
765
855 size_t old_soft_max = soft_max_capacity();
856 if (new_soft_max != old_soft_max) {
857 new_soft_max = MAX2(min_capacity(), new_soft_max);
858 new_soft_max = MIN2(max_capacity(), new_soft_max);
859 if (new_soft_max != old_soft_max) {
860 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
861 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
862 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
863 );
864 set_soft_max_capacity(new_soft_max);
865 return true;
866 }
867 }
868 return false;
869 }
870
871 void ShenandoahHeap::notify_heap_changed() {
872 // Update monitoring counters when we took a new region. This amortizes the
873 // update costs on slow path.
874 monitoring_support()->notify_heap_changed();
875 _heap_changed.set();
876 }
877
878 void ShenandoahHeap::set_forced_counters_update(bool value) {
879 monitoring_support()->set_forced_counters_update(value);
880 }
881
882 void ShenandoahHeap::handle_force_counters_update() {
883 monitoring_support()->handle_force_counters_update();
884 }
885
886 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
887 // New object should fit the GCLAB size
888 size_t min_size = MAX2(size, PLAB::min_size());
889
890 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
891 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
892
893 new_size = MIN2(new_size, PLAB::max_size());
894 new_size = MAX2(new_size, PLAB::min_size());
895
896 // Record new heuristic value even if we take any shortcut. This captures
897 // the case when moderately-sized objects always take a shortcut. At some point,
898 // heuristics should catch up with them.
899 log_debug(gc, free)("Set new GCLAB size: " SIZE_FORMAT, new_size);
900 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
901
902 if (new_size < size) {
903 // New size still does not fit the object. Fall back to shared allocation.
904 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
905 log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
906 return nullptr;
907 }
908
909 // Retire current GCLAB, and allocate a new one.
910 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
911 gclab->retire();
912
913 size_t actual_size = 0;
914 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
915 if (gclab_buf == nullptr) {
916 return nullptr;
917 }
918
919 assert (size <= actual_size, "allocation should fit");
920
921 // ...and clear or zap just allocated TLAB, if needed.
922 if (ZeroTLAB) {
923 Copy::zero_to_words(gclab_buf, actual_size);
924 } else if (ZapTLAB) {
925 // Skip mangling the space corresponding to the object header to
926 // ensure that the returned space is not considered parsable by
927 // any concurrent GC thread.
928 size_t hdr_size = oopDesc::header_size();
929 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
930 }
931 gclab->set_buf(gclab_buf, actual_size);
932 return gclab->allocate(size);
933 }
934
935 // Called from stubs in JIT code or interpreter
936 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
937 size_t requested_size,
938 size_t* actual_size) {
939 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
940 HeapWord* res = allocate_memory(req);
941 if (res != nullptr) {
942 *actual_size = req.actual_size();
943 } else {
944 *actual_size = 0;
945 }
946 return res;
947 }
948
949 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
950 size_t word_size,
951 size_t* actual_size) {
952 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
953 HeapWord* res = allocate_memory(req);
954 if (res != nullptr) {
955 *actual_size = req.actual_size();
964 bool in_new_region = false;
965 HeapWord* result = nullptr;
966
967 if (req.is_mutator_alloc()) {
968 if (ShenandoahPacing) {
969 pacer()->pace_for_alloc(req.size());
970 pacer_epoch = pacer()->epoch();
971 }
972
973 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
974 result = allocate_memory_under_lock(req, in_new_region);
975 }
976
977 // Check that gc overhead is not exceeded.
978 //
979 // Shenandoah will grind along for quite a while allocating one
980 // object at a time using shared (non-tlab) allocations. This check
981 // is testing that the GC overhead limit has not been exceeded.
982 // This will notify the collector to start a cycle, but will raise
983 // an OOME to the mutator if the last Full GCs have not made progress.
984 // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
985 if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
986 control_thread()->handle_alloc_failure(req, false);
987 req.set_actual_size(0);
988 return nullptr;
989 }
990
991 if (result == nullptr) {
992 // Block until control thread reacted, then retry allocation.
993 //
994 // It might happen that one of the threads requesting allocation would unblock
995 // way later after GC happened, only to fail the second allocation, because
996 // other threads have already depleted the free storage. In this case, a better
997 // strategy is to try again, until at least one full GC has completed.
998 //
999 // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
1000 // a) We experienced a GC that had good progress, or
1001 // b) We experienced at least one Full GC (whether or not it had good progress)
1002
1003 size_t original_count = shenandoah_policy()->full_gc_count();
1004 while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
1005 control_thread()->handle_alloc_failure(req, true);
1006 result = allocate_memory_under_lock(req, in_new_region);
1007 }
1008 if (result != nullptr) {
1009 // If our allocation request has been satisifed after it initially failed, we count this as good gc progress
1010 notify_gc_progress();
1011 }
1012 if (log_develop_is_enabled(Debug, gc, alloc)) {
1013 ResourceMark rm;
1014 log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
1015 ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
1016 Thread::current()->name(), p2i(result), req.type_string(), req.size(),
1017 original_count, get_gc_no_progress_count());
1018 }
1019 }
1020 } else {
1021 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1022 result = allocate_memory_under_lock(req, in_new_region);
1023 // Do not call handle_alloc_failure() here, because we cannot block.
1024 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1025 }
1026
1027 if (in_new_region) {
1028 notify_heap_changed();
1029 }
1030
1031 if (result == nullptr) {
1032 req.set_actual_size(0);
1033 }
1034
1035 // This is called regardless of the outcome of the allocation to account
1036 // for any waste created by retiring regions with this request.
1037 increase_used(req);
1038
1039 if (result != nullptr) {
1040 size_t requested = req.size();
1041 size_t actual = req.actual_size();
1042
1043 assert (req.is_lab_alloc() || (requested == actual),
1044 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1045 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1046
1047 if (req.is_mutator_alloc()) {
1048 // If we requested more than we were granted, give the rest back to pacer.
1049 // This only matters if we are in the same pacing epoch: do not try to unpace
1050 // over the budget for the other phase.
1051 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1052 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1053 }
1054 }
1055 }
1056
1057 return result;
1058 }
1059
1060 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1061 // If we are dealing with mutator allocation, then we may need to block for safepoint.
1062 // We cannot block for safepoint for GC allocations, because there is a high chance
1063 // we are already running at safepoint or from stack watermark machinery, and we cannot
1064 // block again.
1065 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1066
1067 // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1068 if (req.is_old() && !old_generation()->can_allocate(req)) {
1069 return nullptr;
1070 }
1071
1072 // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1073 // memory.
1074 HeapWord* result = _free_set->allocate(req, in_new_region);
1075
1076 // Record the plab configuration for this result and register the object.
1077 if (result != nullptr && req.is_old()) {
1078 old_generation()->configure_plab_for_current_thread(req);
1079 if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1080 // Register the newly allocated object while we're holding the global lock since there's no synchronization
1081 // built in to the implementation of register_object(). There are potential races when multiple independent
1082 // threads are allocating objects, some of which might span the same card region. For example, consider
1083 // a card table's memory region within which three objects are being allocated by three different threads:
1084 //
1085 // objects being "concurrently" allocated:
1086 // [-----a------][-----b-----][--------------c------------------]
1087 // [---- card table memory range --------------]
1088 //
1089 // Before any objects are allocated, this card's memory range holds no objects. Note that allocation of object a
1090 // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1091 // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1092 // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1093 // card region.
1094 //
1095 // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1096 // last-start representing object b while first-start represents object c. This is why we need to require all
1097 // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1098 old_generation()->card_scan()->register_object(result);
1099 }
1100 }
1101
1102 return result;
1103 }
1104
1105 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1106 bool* gc_overhead_limit_was_exceeded) {
1107 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1108 return allocate_memory(req);
1109 }
1110
1111 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1112 size_t size,
1113 Metaspace::MetadataType mdtype) {
1114 MetaWord* result;
1115
1116 // Inform metaspace OOM to GC heuristics if class unloading is possible.
1117 ShenandoahHeuristics* h = global_generation()->heuristics();
1118 if (h->can_unload_classes()) {
1119 h->record_metaspace_oom();
1120 }
1121
1122 // Expand and retry allocation
1123 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1124 if (result != nullptr) {
1125 return result;
1126 }
1127
1128 // Start full GC
1129 collect(GCCause::_metadata_GC_clear_soft_refs);
1130
1131 // Retry allocation
1132 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1133 if (result != nullptr) {
1134 return result;
1135 }
1136
1137 // Expand and retry allocation
1138 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1196 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1197 _sh->marked_object_iterate(r, &cl);
1198
1199 if (ShenandoahPacing) {
1200 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1201 }
1202
1203 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1204 break;
1205 }
1206 }
1207 }
1208 };
1209
1210 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1211 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1212 workers()->run_task(&task);
1213 }
1214
1215 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1216 assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1217 if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1218 // This thread went through the OOM during evac protocol. It is safe to return
1219 // the forward pointer. It must not attempt to evacuate any other objects.
1220 return ShenandoahBarrierSet::resolve_forwarded(p);
1221 }
1222
1223 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1224
1225 ShenandoahHeapRegion* r = heap_region_containing(p);
1226 assert(!r->is_humongous(), "never evacuate humongous objects");
1227
1228 ShenandoahAffiliation target_gen = r->affiliation();
1229 return try_evacuate_object(p, thread, r, target_gen);
1230 }
1231
1232 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1233 ShenandoahAffiliation target_gen) {
1234 assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1235 assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1236 bool alloc_from_lab = true;
1237 HeapWord* copy = nullptr;
1238 size_t size = p->size();
1239
1240 #ifdef ASSERT
1241 if (ShenandoahOOMDuringEvacALot &&
1242 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1243 copy = nullptr;
1244 } else {
1245 #endif
1246 if (UseTLAB) {
1247 copy = allocate_from_gclab(thread, size);
1248 }
1249 if (copy == nullptr) {
1250 // If we failed to allocate in LAB, we'll try a shared allocation.
1251 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1252 copy = allocate_memory(req);
1253 alloc_from_lab = false;
1254 }
1255 #ifdef ASSERT
1256 }
1257 #endif
1258
1259 if (copy == nullptr) {
1260 control_thread()->handle_alloc_failure_evac(size);
1261
1262 _oom_evac_handler.handle_out_of_memory_during_evacuation();
1263
1264 return ShenandoahBarrierSet::resolve_forwarded(p);
1265 }
1266
1267 // Copy the object:
1268 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1269
1270 // Try to install the new forwarding pointer.
1271 oop copy_val = cast_to_oop(copy);
1272 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1273 if (result == copy_val) {
1274 // Successfully evacuated. Our copy is now the public one!
1275 ContinuationGCSupport::relativize_stack_chunk(copy_val);
1276 shenandoah_assert_correct(nullptr, copy_val);
1277 return copy_val;
1278 } else {
1279 // Failed to evacuate. We need to deal with the object that is left behind. Since this
1280 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1281 // But if it happens to contain references to evacuated regions, those references would
1282 // not get updated for this stale copy during this cycle, and we will crash while scanning
1283 // it the next cycle.
1284 if (alloc_from_lab) {
1285 // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1286 // object will overwrite this stale copy, or the filler object on LAB retirement will
1287 // do this.
1288 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1289 } else {
1290 // For non-LAB allocations, we have no way to retract the allocation, and
1291 // have to explicitly overwrite the copy with the filler object. With that overwrite,
1292 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1293 assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1294 fill_with_object(copy, size);
1295 shenandoah_assert_correct(nullptr, copy_val);
1296 // For non-LAB allocations, the object has already been registered
1297 }
1298 shenandoah_assert_correct(nullptr, result);
1299 return result;
1300 }
1301 }
1302
1303 void ShenandoahHeap::trash_cset_regions() {
1304 ShenandoahHeapLocker locker(lock());
1305
1306 ShenandoahCollectionSet* set = collection_set();
1307 ShenandoahHeapRegion* r;
1308 set->clear_current_index();
1309 while ((r = set->next()) != nullptr) {
1310 r->make_trash();
1311 }
1312 collection_set()->clear();
1313 }
1314
1315 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1316 st->print_cr("Heap Regions:");
1317 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1318 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1319 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1320 st->print_cr("UWM=update watermark, U=used");
1321 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1322 st->print_cr("S=shared allocs, L=live data");
1323 st->print_cr("CP=critical pins");
1324
1325 for (size_t i = 0; i < num_regions(); i++) {
1326 get_region(i)->print_on(st);
1327 }
1328 }
1329
1330 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1331 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1332
1333 oop humongous_obj = cast_to_oop(start->bottom());
1334 size_t size = humongous_obj->size();
1335 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1336 size_t index = start->index() + required_regions - 1;
1337
1338 assert(!start->has_live(), "liveness must be zero");
1339
1340 for(size_t i = 0; i < required_regions; i++) {
1341 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1342 // as it expects that every region belongs to a humongous region starting with a humongous start region.
1343 ShenandoahHeapRegion* region = get_region(index --);
1344
1345 assert(region->is_humongous(), "expect correct humongous start or continuation");
1346 assert(!region->is_cset(), "Humongous region should not be in collection set");
1347
1348 region->make_trash_immediate();
1349 }
1350 return required_regions;
1351 }
1352
1353 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1354 public:
1355 ShenandoahCheckCleanGCLABClosure() {}
1356 void do_thread(Thread* thread) {
1357 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1358 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1359 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1360
1361 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1362 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1363 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1364 assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1365 }
1366 }
1367 };
1368
1369 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1370 private:
1371 bool const _resize;
1372 public:
1373 ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1374 void do_thread(Thread* thread) {
1375 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1376 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1377 gclab->retire();
1378 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1379 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1380 }
1381
1382 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1383 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1384 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1385
1386 // There are two reasons to retire all plabs between old-gen evacuation passes.
1387 // 1. We need to make the plab memory parsable by remembered-set scanning.
1388 // 2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1389 ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1390 if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1391 ShenandoahThreadLocalData::set_plab_size(thread, 0);
1392 }
1393 }
1394 }
1395 };
1396
1397 void ShenandoahHeap::labs_make_parsable() {
1398 assert(UseTLAB, "Only call with UseTLAB");
1399
1400 ShenandoahRetireGCLABClosure cl(false);
1401
1402 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1403 ThreadLocalAllocBuffer& tlab = t->tlab();
1404 tlab.make_parsable();
1405 cl.do_thread(t);
1406 }
1407
1408 workers()->threads_do(&cl);
1409 }
1410
1411 void ShenandoahHeap::tlabs_retire(bool resize) {
1412 assert(UseTLAB, "Only call with UseTLAB");
1413 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1507 }
1508
1509 void ShenandoahHeap::print_tracing_info() const {
1510 LogTarget(Info, gc, stats) lt;
1511 if (lt.is_enabled()) {
1512 ResourceMark rm;
1513 LogStream ls(lt);
1514
1515 phase_timings()->print_global_on(&ls);
1516
1517 ls.cr();
1518 ls.cr();
1519
1520 shenandoah_policy()->print_gc_stats(&ls);
1521
1522 ls.cr();
1523 ls.cr();
1524 }
1525 }
1526
1527 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1528 shenandoah_assert_control_or_vm_thread_at_safepoint();
1529 _gc_generation = generation;
1530 }
1531
1532 // Active generation may only be set by the VM thread at a safepoint.
1533 void ShenandoahHeap::set_active_generation() {
1534 assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1535 assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1536 assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1537 _active_generation = _gc_generation;
1538 }
1539
1540 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1541 shenandoah_policy()->record_collection_cause(cause);
1542
1543 assert(gc_cause() == GCCause::_no_gc, "Over-writing cause");
1544 assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1545
1546 set_gc_cause(cause);
1547 set_gc_generation(generation);
1548
1549 generation->heuristics()->record_cycle_start();
1550 }
1551
1552 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1553 assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1554 assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1555
1556 generation->heuristics()->record_cycle_end();
1557 if (mode()->is_generational() && generation->is_global()) {
1558 // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1559 young_generation()->heuristics()->record_cycle_end();
1560 old_generation()->heuristics()->record_cycle_end();
1561 }
1562
1563 set_gc_generation(nullptr);
1564 set_gc_cause(GCCause::_no_gc);
1565 }
1566
1567 void ShenandoahHeap::verify(VerifyOption vo) {
1568 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1569 if (ShenandoahVerify) {
1570 verifier()->verify_generic(vo);
1571 } else {
1572 // TODO: Consider allocating verification bitmaps on demand,
1573 // and turn this on unconditionally.
1574 }
1575 }
1576 }
1577 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1578 return _free_set->capacity();
1579 }
1580
1581 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1582 private:
1583 MarkBitMap* _bitmap;
1584 ShenandoahScanObjectStack* _oop_stack;
1585 ShenandoahHeap* const _heap;
1586 ShenandoahMarkingContext* const _marking_context;
1896 } else {
1897 heap_region_iterate(blk);
1898 }
1899 }
1900
1901 class ShenandoahRendezvousClosure : public HandshakeClosure {
1902 public:
1903 inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1904 inline void do_thread(Thread* thread) {}
1905 };
1906
1907 void ShenandoahHeap::rendezvous_threads(const char* name) {
1908 ShenandoahRendezvousClosure cl(name);
1909 Handshake::execute(&cl);
1910 }
1911
1912 void ShenandoahHeap::recycle_trash() {
1913 free_set()->recycle_trash();
1914 }
1915
1916 void ShenandoahHeap::do_class_unloading() {
1917 _unloader.unload();
1918 if (mode()->is_generational()) {
1919 old_generation()->set_parsable(false);
1920 }
1921 }
1922
1923 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1924 // Weak refs processing
1925 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1926 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1927 ShenandoahTimingsTracker t(phase);
1928 ShenandoahGCWorkerPhase worker_phase(phase);
1929 shenandoah_assert_generations_reconciled();
1930 gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
1931 }
1932
1933 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1934 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1935
1936 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1937 // make them parsable for update code to work correctly. Plus, we can compute new sizes
1938 // for future GCLABs here.
1939 if (UseTLAB) {
1940 ShenandoahGCPhase phase(concurrent ?
1941 ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1942 ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1943 gclabs_retire(ResizeTLAB);
1944 }
1945
1946 _update_refs_iterator.reset();
1947 }
1948
1949 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1950 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1951 if (_gc_state_changed) {
1952 _gc_state_changed = false;
1953 char state = gc_state();
1954 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1955 ShenandoahThreadLocalData::set_gc_state(t, state);
1956 }
1957 }
1958 }
1959
1960 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1961 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1962 _gc_state.set_cond(mask, value);
1963 _gc_state_changed = true;
1964 // Check that if concurrent weak root is set then active_gen isn't null
1965 assert(!is_concurrent_weak_root_in_progress() || active_generation() != nullptr, "Error");
1966 shenandoah_assert_generations_reconciled();
1967 }
1968
1969 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
1970 uint mask;
1971 assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
1972 if (!in_progress && is_concurrent_old_mark_in_progress()) {
1973 assert(mode()->is_generational(), "Only generational GC has old marking");
1974 assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
1975 // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
1976 mask = YOUNG_MARKING;
1977 } else {
1978 mask = MARKING | YOUNG_MARKING;
1979 }
1980 set_gc_state(mask, in_progress);
1981 manage_satb_barrier(in_progress);
1982 }
1983
1984 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
1985 #ifdef ASSERT
1986 // has_forwarded_objects() iff UPDATEREFS or EVACUATION
1987 bool has_forwarded = has_forwarded_objects();
1988 bool updating_or_evacuating = _gc_state.is_set(UPDATEREFS | EVACUATION);
1989 bool evacuating = _gc_state.is_set(EVACUATION);
1990 assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
1991 "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
1992 #endif
1993 if (!in_progress && is_concurrent_young_mark_in_progress()) {
1994 // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
1995 assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
1996 set_gc_state(OLD_MARKING, in_progress);
1997 } else {
1998 set_gc_state(MARKING | OLD_MARKING, in_progress);
1999 }
2000 manage_satb_barrier(in_progress);
2001 }
2002
2003 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2004 return old_generation()->is_preparing_for_mark();
2005 }
2006
2007 void ShenandoahHeap::manage_satb_barrier(bool active) {
2008 if (is_concurrent_mark_in_progress()) {
2009 // Ignore request to deactivate barrier while concurrent mark is in progress.
2010 // Do not attempt to re-activate the barrier if it is already active.
2011 if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2012 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2013 }
2014 } else {
2015 // No concurrent marking is in progress so honor request to deactivate,
2016 // but only if the barrier is already active.
2017 if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2018 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2019 }
2020 }
2021 }
2022
2023 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2024 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2025 set_gc_state(EVACUATION, in_progress);
2026 }
2027
2028 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2029 if (in_progress) {
2030 _concurrent_strong_root_in_progress.set();
2031 } else {
2032 _concurrent_strong_root_in_progress.unset();
2033 }
2034 }
2035
2036 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2037 set_gc_state(WEAK_ROOTS, cond);
2038 }
2039
2040 GCTracer* ShenandoahHeap::tracer() {
2041 return shenandoah_policy()->tracer();
2042 }
2043
2044 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2045 return _free_set->used();
2046 }
2047
2048 bool ShenandoahHeap::try_cancel_gc() {
2049 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2050 return prev == CANCELLABLE;
2051 }
2052
2053 void ShenandoahHeap::cancel_concurrent_mark() {
2054 if (mode()->is_generational()) {
2055 young_generation()->cancel_marking();
2056 old_generation()->cancel_marking();
2057 }
2058
2059 global_generation()->cancel_marking();
2060
2061 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2062 }
2063
2064 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2065 if (try_cancel_gc()) {
2066 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2067 log_info(gc)("%s", msg.buffer());
2068 Events::log(Thread::current(), "%s", msg.buffer());
2069 _cancel_requested_time = os::elapsedTime();
2070 }
2071 }
2072
2073 uint ShenandoahHeap::max_workers() {
2074 return _max_workers;
2075 }
2076
2077 void ShenandoahHeap::stop() {
2078 // The shutdown sequence should be able to terminate when GC is running.
2079
2080 // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2081 _shenandoah_policy->record_shutdown();
2082
2083 // Step 1. Notify control thread that we are in shutdown.
2084 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2085 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2086 control_thread()->prepare_for_graceful_shutdown();
2087
2088 // Step 2. Notify GC workers that we are cancelling GC.
2089 cancel_gc(GCCause::_shenandoah_stop_vm);
2173 }
2174
2175 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2176 set_gc_state(HAS_FORWARDED, cond);
2177 }
2178
2179 void ShenandoahHeap::set_unload_classes(bool uc) {
2180 _unload_classes.set_cond(uc);
2181 }
2182
2183 bool ShenandoahHeap::unload_classes() const {
2184 return _unload_classes.is_set();
2185 }
2186
2187 address ShenandoahHeap::in_cset_fast_test_addr() {
2188 ShenandoahHeap* heap = ShenandoahHeap::heap();
2189 assert(heap->collection_set() != nullptr, "Sanity");
2190 return (address) heap->collection_set()->biased_map_address();
2191 }
2192
2193 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2194 if (mode()->is_generational()) {
2195 young_generation()->reset_bytes_allocated_since_gc_start();
2196 old_generation()->reset_bytes_allocated_since_gc_start();
2197 }
2198
2199 global_generation()->reset_bytes_allocated_since_gc_start();
2200 }
2201
2202 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2203 _degenerated_gc_in_progress.set_cond(in_progress);
2204 }
2205
2206 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2207 _full_gc_in_progress.set_cond(in_progress);
2208 }
2209
2210 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2211 assert (is_full_gc_in_progress(), "should be");
2212 _full_gc_move_in_progress.set_cond(in_progress);
2213 }
2214
2215 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2216 set_gc_state(UPDATEREFS, in_progress);
2217 }
2218
2219 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2243 if (r->is_active()) {
2244 if (r->is_pinned()) {
2245 if (r->pin_count() == 0) {
2246 r->make_unpinned();
2247 }
2248 } else {
2249 if (r->pin_count() > 0) {
2250 r->make_pinned();
2251 }
2252 }
2253 }
2254 }
2255
2256 assert_pinned_region_status();
2257 }
2258
2259 #ifdef ASSERT
2260 void ShenandoahHeap::assert_pinned_region_status() {
2261 for (size_t i = 0; i < num_regions(); i++) {
2262 ShenandoahHeapRegion* r = get_region(i);
2263 shenandoah_assert_generations_reconciled();
2264 if (gc_generation()->contains(r)) {
2265 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2266 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2267 }
2268 }
2269 }
2270 #endif
2271
2272 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2273 return _gc_timer;
2274 }
2275
2276 void ShenandoahHeap::prepare_concurrent_roots() {
2277 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2278 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2279 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2280 set_concurrent_weak_root_in_progress(true);
2281 if (unload_classes()) {
2282 _unloader.prepare();
2283 }
2284 }
2285
2286 void ShenandoahHeap::finish_concurrent_roots() {
2287 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2302 } else {
2303 // Use ConcGCThreads outside safepoints
2304 assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2305 ConcGCThreads, nworkers);
2306 }
2307 }
2308 #endif
2309
2310 ShenandoahVerifier* ShenandoahHeap::verifier() {
2311 guarantee(ShenandoahVerify, "Should be enabled");
2312 assert (_verifier != nullptr, "sanity");
2313 return _verifier;
2314 }
2315
2316 template<bool CONCURRENT>
2317 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2318 private:
2319 ShenandoahHeap* _heap;
2320 ShenandoahRegionIterator* _regions;
2321 public:
2322 explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2323 WorkerTask("Shenandoah Update References"),
2324 _heap(ShenandoahHeap::heap()),
2325 _regions(regions) {
2326 }
2327
2328 void work(uint worker_id) {
2329 if (CONCURRENT) {
2330 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2331 ShenandoahSuspendibleThreadSetJoiner stsj;
2332 do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2333 } else {
2334 ShenandoahParallelWorkerSession worker_session(worker_id);
2335 do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2336 }
2337 }
2338
2339 private:
2340 template<class T>
2341 void do_work(uint worker_id) {
2342 if (CONCURRENT && (worker_id == 0)) {
2343 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2344 // results of evacuation. These reserves are no longer necessary because evacuation has completed.
2345 size_t cset_regions = _heap->collection_set()->count();
2346
2347 // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2348 // to the mutator free set. At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2349 // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2350 // next GC cycle.
2351 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2352 }
2353 // If !CONCURRENT, there's no value in expanding Mutator free set
2354 T cl;
2355 ShenandoahHeapRegion* r = _regions->next();
2356 while (r != nullptr) {
2357 HeapWord* update_watermark = r->get_update_watermark();
2358 assert (update_watermark >= r->bottom(), "sanity");
2359 if (r->is_active() && !r->is_cset()) {
2360 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2361 if (ShenandoahPacing) {
2362 _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2363 }
2364 }
2365 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2366 return;
2367 }
2368 r = _regions->next();
2369 }
2370 }
2371 };
2372
2373 void ShenandoahHeap::update_heap_references(bool concurrent) {
2374 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2375
2376 if (concurrent) {
2377 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2378 workers()->run_task(&task);
2379 } else {
2380 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2381 workers()->run_task(&task);
2382 }
2383 }
2384
2385 ShenandoahSynchronizePinnedRegionStates::ShenandoahSynchronizePinnedRegionStates() : _lock(ShenandoahHeap::heap()->lock()) { }
2386
2387 void ShenandoahSynchronizePinnedRegionStates::heap_region_do(ShenandoahHeapRegion* r) {
2388 // Drop "pinned" state from regions that no longer have a pinned count. Put
2389 // regions with a pinned count into the "pinned" state.
2390 if (r->is_active()) {
2391 if (r->is_pinned()) {
2392 if (r->pin_count() == 0) {
2393 ShenandoahHeapLocker locker(_lock);
2394 r->make_unpinned();
2395 }
2396 } else {
2397 if (r->pin_count() > 0) {
2398 ShenandoahHeapLocker locker(_lock);
2399 r->make_pinned();
2400 }
2401 }
2402 }
2403 }
2404
2405 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2406 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2407 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2408
2409 {
2410 ShenandoahGCPhase phase(concurrent ?
2411 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2412 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2413
2414 final_update_refs_update_region_states();
2415
2416 assert_pinned_region_status();
2417 }
2418
2419 {
2420 ShenandoahGCPhase phase(concurrent ?
2421 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2422 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2423 trash_cset_regions();
2424 }
2425 }
2426
2427 void ShenandoahHeap::final_update_refs_update_region_states() {
2428 ShenandoahSynchronizePinnedRegionStates cl;
2429 parallel_heap_region_iterate(&cl);
2430 }
2431
2432 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2433 ShenandoahGCPhase phase(concurrent ?
2434 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2435 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2436 ShenandoahHeapLocker locker(lock());
2437 size_t young_cset_regions, old_cset_regions;
2438 size_t first_old_region, last_old_region, old_region_count;
2439 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2440 // If there are no old regions, first_old_region will be greater than last_old_region
2441 assert((first_old_region > last_old_region) ||
2442 ((last_old_region + 1 - first_old_region >= old_region_count) &&
2443 get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2444 "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT,
2445 old_region_count, first_old_region, last_old_region);
2446
2447 if (mode()->is_generational()) {
2448 #ifdef ASSERT
2449 if (ShenandoahVerify) {
2450 verifier()->verify_before_rebuilding_free_set();
2451 }
2452 #endif
2453
2454 // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2455 // available for transfer to old. Note that transfer of humongous regions does not impact available.
2456 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2457 size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2458 gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2459
2460 // Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
2461 // memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
2462 // regions in place when many of these regular regions have an abundant amount of available memory within them. Fragmentation
2463 // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2464 //
2465 // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2466 // within partially consumed regions of memory.
2467 }
2468 // Rebuild free set based on adjusted generation sizes.
2469 _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2470
2471 if (mode()->is_generational()) {
2472 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2473 ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2474 old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2475 }
2476 }
2477
2478 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2479 print_on(st);
2480 st->cr();
2481 print_heap_regions_on(st);
2482 }
2483
2484 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2485 size_t slice = r->index() / _bitmap_regions_per_slice;
2486
2487 size_t regions_from = _bitmap_regions_per_slice * slice;
2488 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2489 for (size_t g = regions_from; g < regions_to; g++) {
2490 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2491 if (skip_self && g == r->index()) continue;
2492 if (get_region(g)->is_committed()) {
2493 return true;
2494 }
2577 void ShenandoahHeap::initialize_serviceability() {
2578 _memory_pool = new ShenandoahMemoryPool(this);
2579 _cycle_memory_manager.add_pool(_memory_pool);
2580 _stw_memory_manager.add_pool(_memory_pool);
2581 }
2582
2583 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2584 GrowableArray<GCMemoryManager*> memory_managers(2);
2585 memory_managers.append(&_cycle_memory_manager);
2586 memory_managers.append(&_stw_memory_manager);
2587 return memory_managers;
2588 }
2589
2590 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2591 GrowableArray<MemoryPool*> memory_pools(1);
2592 memory_pools.append(_memory_pool);
2593 return memory_pools;
2594 }
2595
2596 MemoryUsage ShenandoahHeap::memory_usage() {
2597 return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2598 }
2599
2600 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2601 _heap(ShenandoahHeap::heap()),
2602 _index(0) {}
2603
2604 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2605 _heap(heap),
2606 _index(0) {}
2607
2608 void ShenandoahRegionIterator::reset() {
2609 _index = 0;
2610 }
2611
2612 bool ShenandoahRegionIterator::has_next() const {
2613 return _index < _heap->num_regions();
2614 }
2615
2616 char ShenandoahHeap::gc_state() const {
2617 return _gc_state.raw_value();
2719 }
2720
2721 // No unclaimed tail at the end of archive space.
2722 assert(cur == end,
2723 "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT,
2724 p2i(cur), p2i(end));
2725
2726 // Region bounds are good.
2727 ShenandoahHeapRegion* begin_reg = heap_region_containing(start);
2728 ShenandoahHeapRegion* end_reg = heap_region_containing(end);
2729 assert(begin_reg->is_regular(), "Must be");
2730 assert(end_reg->is_regular(), "Must be");
2731 assert(begin_reg->bottom() == start,
2732 "Must agree: archive-space-start: " PTR_FORMAT ", begin-region-bottom: " PTR_FORMAT,
2733 p2i(start), p2i(begin_reg->bottom()));
2734 assert(end_reg->top() == end,
2735 "Must agree: archive-space-end: " PTR_FORMAT ", end-region-top: " PTR_FORMAT,
2736 p2i(end), p2i(end_reg->top()));
2737 #endif
2738 }
2739
2740 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2741 if (!mode()->is_generational()) {
2742 return global_generation();
2743 } else if (affiliation == YOUNG_GENERATION) {
2744 return young_generation();
2745 } else if (affiliation == OLD_GENERATION) {
2746 return old_generation();
2747 }
2748
2749 ShouldNotReachHere();
2750 return nullptr;
2751 }
2752
2753 void ShenandoahHeap::log_heap_status(const char* msg) const {
2754 if (mode()->is_generational()) {
2755 young_generation()->log_status(msg);
2756 old_generation()->log_status(msg);
2757 } else {
2758 global_generation()->log_status(msg);
2759 }
2760 }
2761
|