1 /*
2 * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "memory/allocation.hpp"
28 #include "memory/universe.hpp"
29
30 #include "gc/shared/classUnloadingContext.hpp"
31 #include "gc/shared/gcArguments.hpp"
32 #include "gc/shared/gcTimer.hpp"
33 #include "gc/shared/gcTraceTime.inline.hpp"
34 #include "gc/shared/locationPrinter.inline.hpp"
35 #include "gc/shared/memAllocator.hpp"
36 #include "gc/shared/plab.hpp"
37 #include "gc/shared/tlab_globals.hpp"
38
39 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
40 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
41 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
42 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
43 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahControlThread.hpp"
46 #include "gc/shenandoah/shenandoahFreeSet.hpp"
47 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
48 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
49 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
50 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
51 #include "gc/shenandoah/shenandoahInitLogger.hpp"
52 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
53 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
54 #include "gc/shenandoah/shenandoahMetrics.hpp"
55 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
56 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
58 #include "gc/shenandoah/shenandoahPadding.hpp"
59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
60 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
61 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
63 #include "gc/shenandoah/shenandoahUtils.hpp"
64 #include "gc/shenandoah/shenandoahVerifier.hpp"
65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
69 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
70 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
71 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
72 #if INCLUDE_JFR
73 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
74 #endif
75
76 #include "classfile/systemDictionary.hpp"
77 #include "code/codeCache.hpp"
78 #include "memory/classLoaderMetaspace.hpp"
79 #include "memory/metaspaceUtils.hpp"
80 #include "oops/compressedOops.inline.hpp"
81 #include "prims/jvmtiTagMap.hpp"
82 #include "runtime/atomic.hpp"
83 #include "runtime/globals.hpp"
84 #include "runtime/interfaceSupport.inline.hpp"
85 #include "runtime/java.hpp"
86 #include "runtime/orderAccess.hpp"
87 #include "runtime/safepointMechanism.hpp"
88 #include "runtime/vmThread.hpp"
89 #include "services/mallocTracker.hpp"
90 #include "services/memTracker.hpp"
91 #include "utilities/events.hpp"
92 #include "utilities/powerOfTwo.hpp"
93
94 class ShenandoahPretouchHeapTask : public WorkerTask {
95 private:
96 ShenandoahRegionIterator _regions;
97 const size_t _page_size;
98 public:
99 ShenandoahPretouchHeapTask(size_t page_size) :
100 WorkerTask("Shenandoah Pretouch Heap"),
101 _page_size(page_size) {}
102
103 virtual void work(uint worker_id) {
104 ShenandoahHeapRegion* r = _regions.next();
105 while (r != nullptr) {
106 if (r->is_committed()) {
107 os::pretouch_memory(r->bottom(), r->end(), _page_size);
143 jint ShenandoahHeap::initialize() {
144 //
145 // Figure out heap sizing
146 //
147
148 size_t init_byte_size = InitialHeapSize;
149 size_t min_byte_size = MinHeapSize;
150 size_t max_byte_size = MaxHeapSize;
151 size_t heap_alignment = HeapAlignment;
152
153 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
154
155 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
156 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
157
158 _num_regions = ShenandoahHeapRegion::region_count();
159 assert(_num_regions == (max_byte_size / reg_size_bytes),
160 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
161 _num_regions, max_byte_size, reg_size_bytes);
162
163 // Now we know the number of regions, initialize the heuristics.
164 initialize_heuristics();
165
166 size_t num_committed_regions = init_byte_size / reg_size_bytes;
167 num_committed_regions = MIN2(num_committed_regions, _num_regions);
168 assert(num_committed_regions <= _num_regions, "sanity");
169 _initial_size = num_committed_regions * reg_size_bytes;
170
171 size_t num_min_regions = min_byte_size / reg_size_bytes;
172 num_min_regions = MIN2(num_min_regions, _num_regions);
173 assert(num_min_regions <= _num_regions, "sanity");
174 _minimum_size = num_min_regions * reg_size_bytes;
175
176 // Default to max heap size.
177 _soft_max_size = _num_regions * reg_size_bytes;
178
179 _committed = _initial_size;
180
181 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
182 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
183 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
184
185 //
186 // Reserve and commit memory for heap
187 //
188
189 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
190 initialize_reserved_region(heap_rs);
191 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
192 _heap_region_special = heap_rs.special();
193
194 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
195 "Misaligned heap: " PTR_FORMAT, p2i(base()));
196
197 #if SHENANDOAH_OPTIMIZED_MARKTASK
198 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
199 // Fail if we ever attempt to address more than we can.
200 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
201 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
202 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
203 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
204 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
205 vm_exit_during_initialization("Fatal Error", buf);
206 }
207 #endif
208
209 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
210 if (!_heap_region_special) {
211 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
212 "Cannot commit heap memory");
213 }
214
215 //
216 // Reserve and commit memory for bitmap(s)
217 //
218
219 _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
220 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
221
222 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
223
224 guarantee(bitmap_bytes_per_region != 0,
225 "Bitmap bytes per region should not be zero");
226 guarantee(is_power_of_2(bitmap_bytes_per_region),
227 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
228
229 if (bitmap_page_size > bitmap_bytes_per_region) {
230 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
231 _bitmap_bytes_per_slice = bitmap_page_size;
232 } else {
233 _bitmap_regions_per_slice = 1;
234 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
235 }
236
237 guarantee(_bitmap_regions_per_slice >= 1,
238 "Should have at least one region per slice: " SIZE_FORMAT,
239 _bitmap_regions_per_slice);
240
241 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
242 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
243 _bitmap_bytes_per_slice, bitmap_page_size);
244
245 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
246 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
247 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
248 _bitmap_region_special = bitmap.special();
249
250 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
251 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
252 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
253 if (!_bitmap_region_special) {
254 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
255 "Cannot commit bitmap memory");
256 }
257
258 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
259
260 if (ShenandoahVerify) {
261 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
262 if (!verify_bitmap.special()) {
263 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
264 "Cannot commit verification bitmap memory");
265 }
266 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
267 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
268 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
269 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
270 }
271
272 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
273 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
274 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
275 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
276 _aux_bitmap_region_special = aux_bitmap.special();
277 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
278
279 //
280 // Create regions and region sets
281 //
282 size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
283 size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
284 region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
285
286 ReservedSpace region_storage(region_storage_size, region_page_size);
287 MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
288 if (!region_storage.special()) {
289 os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
290 "Cannot commit region memory");
291 }
292
293 // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
294 // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
295 // If not successful, bite a bullet and allocate at whatever address.
296 {
297 size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
298 size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
299
300 uintptr_t min = round_up_power_of_2(cset_align);
301 uintptr_t max = (1u << 30u);
302
303 for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
304 char* req_addr = (char*)addr;
305 assert(is_aligned(req_addr, cset_align), "Should be aligned");
306 ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
307 if (cset_rs.is_reserved()) {
308 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
309 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
310 break;
311 }
312 }
313
314 if (_collection_set == nullptr) {
315 ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
316 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
317 }
318 }
319
320 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
321 _free_set = new ShenandoahFreeSet(this, _num_regions);
322
323 {
324 ShenandoahHeapLocker locker(lock());
325
326 for (size_t i = 0; i < _num_regions; i++) {
327 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
328 bool is_committed = i < num_committed_regions;
329 void* loc = region_storage.base() + i * region_align;
330
331 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
332 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
333
334 _marking_context->initialize_top_at_mark_start(r);
335 _regions[i] = r;
336 assert(!collection_set()->is_in(i), "New region should not be in collection set");
337 }
338
339 // Initialize to complete
340 _marking_context->mark_complete();
341
342 _free_set->rebuild();
343 }
344
345 if (AlwaysPreTouch) {
346 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
347 // before initialize() below zeroes it with initializing thread. For any given region,
348 // we touch the region and the corresponding bitmaps from the same thread.
349 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
350
351 _pretouch_heap_page_size = heap_page_size;
352 _pretouch_bitmap_page_size = bitmap_page_size;
353
354 #ifdef LINUX
355 // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
356 // pages. But, the kernel needs to know that every small page is used, in order to coalesce
357 // them into huge one. Therefore, we need to pretouch with smaller pages.
358 if (UseTransparentHugePages) {
359 _pretouch_heap_page_size = (size_t)os::vm_page_size();
360 _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
361 }
362 #endif
363
364 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
365 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
366
367 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
368 _workers->run_task(&bcl);
369
370 ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
371 _workers->run_task(&hcl);
372 }
373
374 //
375 // Initialize the rest of GC subsystems
376 //
377
378 _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
379 for (uint worker = 0; worker < _max_workers; worker++) {
380 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
381 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
382 }
383
384 // There should probably be Shenandoah-specific options for these,
385 // just as there are G1-specific options.
386 {
387 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
388 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
389 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
390 }
391
392 _monitoring_support = new ShenandoahMonitoringSupport(this);
393 _phase_timings = new ShenandoahPhaseTimings(max_workers());
394 ShenandoahCodeRoots::initialize();
395
396 if (ShenandoahPacing) {
397 _pacer = new ShenandoahPacer(this);
398 _pacer->setup_for_idle();
399 } else {
400 _pacer = nullptr;
401 }
402
403 _control_thread = new ShenandoahControlThread();
404
405 ShenandoahInitLogger::print();
406
407 return JNI_OK;
408 }
409
410 void ShenandoahHeap::initialize_mode() {
411 if (ShenandoahGCMode != nullptr) {
412 if (strcmp(ShenandoahGCMode, "satb") == 0) {
413 _gc_mode = new ShenandoahSATBMode();
414 } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
415 _gc_mode = new ShenandoahIUMode();
416 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
417 _gc_mode = new ShenandoahPassiveMode();
418 } else {
419 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
420 }
421 } else {
422 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
423 }
424 _gc_mode->initialize_flags();
425 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
426 vm_exit_during_initialization(
427 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
428 _gc_mode->name()));
429 }
430 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
431 vm_exit_during_initialization(
432 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
433 _gc_mode->name()));
434 }
435 }
436
437 void ShenandoahHeap::initialize_heuristics() {
438 assert(_gc_mode != nullptr, "Must be initialized");
439 _heuristics = _gc_mode->initialize_heuristics();
440
441 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
442 vm_exit_during_initialization(
443 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
444 _heuristics->name()));
445 }
446 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
447 vm_exit_during_initialization(
448 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
449 _heuristics->name()));
450 }
451 }
452
453 #ifdef _MSC_VER
454 #pragma warning( push )
455 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
456 #endif
457
458 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
459 CollectedHeap(),
460 _initial_size(0),
461 _used(0),
462 _committed(0),
463 _bytes_allocated_since_gc_start(0),
464 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
465 _workers(nullptr),
466 _safepoint_workers(nullptr),
467 _heap_region_special(false),
468 _num_regions(0),
469 _regions(nullptr),
470 _update_refs_iterator(this),
471 _gc_state_changed(false),
472 _control_thread(nullptr),
473 _shenandoah_policy(policy),
474 _gc_mode(nullptr),
475 _heuristics(nullptr),
476 _free_set(nullptr),
477 _pacer(nullptr),
478 _verifier(nullptr),
479 _phase_timings(nullptr),
480 _monitoring_support(nullptr),
481 _memory_pool(nullptr),
482 _stw_memory_manager("Shenandoah Pauses"),
483 _cycle_memory_manager("Shenandoah Cycles"),
484 _gc_timer(new ConcurrentGCTimer()),
485 _soft_ref_policy(),
486 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
487 _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
488 _marking_context(nullptr),
489 _bitmap_size(0),
490 _bitmap_regions_per_slice(0),
491 _bitmap_bytes_per_slice(0),
492 _bitmap_region_special(false),
493 _aux_bitmap_region_special(false),
494 _liveness_cache(nullptr),
495 _collection_set(nullptr)
496 {
497 // Initialize GC mode early, so we can adjust barrier support
498 initialize_mode();
499 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
500
501 _max_workers = MAX2(_max_workers, 1U);
502 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
503 if (_workers == nullptr) {
504 vm_exit_during_initialization("Failed necessary allocation.");
505 } else {
506 _workers->initialize_workers();
507 }
508
509 if (ParallelGCThreads > 1) {
510 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
511 ParallelGCThreads);
512 _safepoint_workers->initialize_workers();
513 }
514 }
515
516 #ifdef _MSC_VER
517 #pragma warning( pop )
518 #endif
519
520 class ShenandoahResetBitmapTask : public WorkerTask {
521 private:
522 ShenandoahRegionIterator _regions;
523
524 public:
525 ShenandoahResetBitmapTask() :
526 WorkerTask("Shenandoah Reset Bitmap") {}
527
528 void work(uint worker_id) {
529 ShenandoahHeapRegion* region = _regions.next();
530 ShenandoahHeap* heap = ShenandoahHeap::heap();
531 ShenandoahMarkingContext* const ctx = heap->marking_context();
532 while (region != nullptr) {
533 if (heap->is_bitmap_slice_committed(region)) {
534 ctx->clear_bitmap(region);
535 }
536 region = _regions.next();
537 }
538 }
539 };
540
541 void ShenandoahHeap::reset_mark_bitmap() {
542 assert_gc_workers(_workers->active_workers());
543 mark_incomplete_marking_context();
544
545 ShenandoahResetBitmapTask task;
546 _workers->run_task(&task);
547 }
548
549 void ShenandoahHeap::print_on(outputStream* st) const {
550 st->print_cr("Shenandoah Heap");
551 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
552 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
553 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
554 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
555 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
556 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
557 num_regions(),
558 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
559 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
560
561 st->print("Status: ");
562 if (has_forwarded_objects()) st->print("has forwarded objects, ");
563 if (is_concurrent_mark_in_progress()) st->print("marking, ");
564 if (is_evacuation_in_progress()) st->print("evacuating, ");
565 if (is_update_refs_in_progress()) st->print("updating refs, ");
566 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
567 if (is_full_gc_in_progress()) st->print("full gc, ");
568 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
569 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
570 if (is_concurrent_strong_root_in_progress() &&
571 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
572
573 if (cancelled_gc()) {
574 st->print("cancelled");
575 } else {
576 st->print("not cancelled");
577 }
578 st->cr();
579
580 st->print_cr("Reserved region:");
581 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
582 p2i(reserved_region().start()),
583 p2i(reserved_region().end()));
587 if (cset != nullptr) {
588 st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
589 st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
590 } else {
591 st->print_cr(" (null)");
592 }
593
594 st->cr();
595 MetaspaceUtils::print_on(st);
596
597 if (Verbose) {
598 st->cr();
599 print_heap_regions_on(st);
600 }
601 }
602
603 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
604 public:
605 void do_thread(Thread* thread) {
606 assert(thread != nullptr, "Sanity");
607 assert(thread->is_Worker_thread(), "Only worker thread expected");
608 ShenandoahThreadLocalData::initialize_gclab(thread);
609 }
610 };
611
612 void ShenandoahHeap::post_initialize() {
613 CollectedHeap::post_initialize();
614 MutexLocker ml(Threads_lock);
615
616 ShenandoahInitWorkerGCLABClosure init_gclabs;
617 _workers->threads_do(&init_gclabs);
618
619 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
620 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
621 _workers->set_initialize_gclab();
622 if (_safepoint_workers != nullptr) {
623 _safepoint_workers->threads_do(&init_gclabs);
624 _safepoint_workers->set_initialize_gclab();
625 }
626
627 _heuristics->initialize();
628
629 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
630 }
631
632 size_t ShenandoahHeap::used() const {
633 return Atomic::load(&_used);
634 }
635
636 size_t ShenandoahHeap::committed() const {
637 return Atomic::load(&_committed);
638 }
639
640 void ShenandoahHeap::increase_committed(size_t bytes) {
641 shenandoah_assert_heaplocked_or_safepoint();
642 _committed += bytes;
643 }
644
645 void ShenandoahHeap::decrease_committed(size_t bytes) {
646 shenandoah_assert_heaplocked_or_safepoint();
647 _committed -= bytes;
648 }
649
650 void ShenandoahHeap::increase_used(size_t bytes) {
651 Atomic::add(&_used, bytes, memory_order_relaxed);
652 }
653
654 void ShenandoahHeap::set_used(size_t bytes) {
655 Atomic::store(&_used, bytes);
656 }
657
658 void ShenandoahHeap::decrease_used(size_t bytes) {
659 assert(used() >= bytes, "never decrease heap size by more than we've left");
660 Atomic::sub(&_used, bytes, memory_order_relaxed);
661 }
662
663 void ShenandoahHeap::increase_allocated(size_t bytes) {
664 Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
665 }
666
667 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
668 size_t bytes = words * HeapWordSize;
669 if (!waste) {
670 increase_used(bytes);
671 }
672 increase_allocated(bytes);
673 if (ShenandoahPacing) {
674 control_thread()->pacing_notify_alloc(words);
675 if (waste) {
676 pacer()->claim_for_alloc(words, true);
677 }
678 }
679 }
680
681 size_t ShenandoahHeap::capacity() const {
682 return committed();
683 }
684
685 size_t ShenandoahHeap::max_capacity() const {
686 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
687 }
688
689 size_t ShenandoahHeap::soft_max_capacity() const {
690 size_t v = Atomic::load(&_soft_max_size);
691 assert(min_capacity() <= v && v <= max_capacity(),
692 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
693 min_capacity(), v, max_capacity());
694 return v;
695 }
696
697 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
698 assert(min_capacity() <= v && v <= max_capacity(),
699 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
700 min_capacity(), v, max_capacity());
701 Atomic::store(&_soft_max_size, v);
702 }
703
704 size_t ShenandoahHeap::min_capacity() const {
705 return _minimum_size;
706 }
707
708 size_t ShenandoahHeap::initial_capacity() const {
709 return _initial_size;
710 }
711
712 bool ShenandoahHeap::is_in(const void* p) const {
713 if (is_in_reserved(p)) {
714 if (is_full_gc_move_in_progress()) {
715 // Full GC move is running, we do not have a consistent region
716 // information yet. But we know the pointer is in heap.
717 return true;
718 }
719 // Now check if we point to a live section in active region.
720 ShenandoahHeapRegion* r = heap_region_containing(p);
721 return (r->is_active() && p < r->top());
722 } else {
723 return false;
724 }
725 }
726
727 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
728 assert (ShenandoahUncommit, "should be enabled");
729
730 // Application allocates from the beginning of the heap, and GC allocates at
731 // the end of it. It is more efficient to uncommit from the end, so that applications
732 // could enjoy the near committed regions. GC allocations are much less frequent,
733 // and therefore can accept the committing costs.
734
735 size_t count = 0;
736 for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
737 ShenandoahHeapRegion* r = get_region(i - 1);
738 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
739 ShenandoahHeapLocker locker(lock());
740 if (r->is_empty_committed()) {
741 if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
742 break;
743 }
744
745 r->make_uncommitted();
746 count++;
747 }
748 }
749 SpinPause(); // allow allocators to take the lock
750 }
751
752 if (count > 0) {
753 control_thread()->notify_heap_changed();
754 }
755 }
756
757 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
758 // New object should fit the GCLAB size
759 size_t min_size = MAX2(size, PLAB::min_size());
760
761 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
762 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
763 new_size = MIN2(new_size, PLAB::max_size());
764 new_size = MAX2(new_size, PLAB::min_size());
765
766 // Record new heuristic value even if we take any shortcut. This captures
767 // the case when moderately-sized objects always take a shortcut. At some point,
768 // heuristics should catch up with them.
769 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
770
771 if (new_size < size) {
772 // New size still does not fit the object. Fall back to shared allocation.
773 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
774 return nullptr;
775 }
776
777 // Retire current GCLAB, and allocate a new one.
778 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
779 gclab->retire();
780
781 size_t actual_size = 0;
782 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
783 if (gclab_buf == nullptr) {
784 return nullptr;
785 }
786
787 assert (size <= actual_size, "allocation should fit");
788
789 // ...and clear or zap just allocated TLAB, if needed.
790 if (ZeroTLAB) {
791 Copy::zero_to_words(gclab_buf, actual_size);
792 } else if (ZapTLAB) {
793 // Skip mangling the space corresponding to the object header to
794 // ensure that the returned space is not considered parsable by
795 // any concurrent GC thread.
796 size_t hdr_size = oopDesc::header_size();
797 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
798 }
799 gclab->set_buf(gclab_buf, actual_size);
800 return gclab->allocate(size);
801 }
802
803 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
804 size_t requested_size,
805 size_t* actual_size) {
806 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
807 HeapWord* res = allocate_memory(req);
808 if (res != nullptr) {
809 *actual_size = req.actual_size();
810 } else {
811 *actual_size = 0;
812 }
813 return res;
814 }
815
816 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
817 size_t word_size,
818 size_t* actual_size) {
819 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
820 HeapWord* res = allocate_memory(req);
821 if (res != nullptr) {
822 *actual_size = req.actual_size();
824 *actual_size = 0;
825 }
826 return res;
827 }
828
829 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
830 intptr_t pacer_epoch = 0;
831 bool in_new_region = false;
832 HeapWord* result = nullptr;
833
834 if (req.is_mutator_alloc()) {
835 if (ShenandoahPacing) {
836 pacer()->pace_for_alloc(req.size());
837 pacer_epoch = pacer()->epoch();
838 }
839
840 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
841 result = allocate_memory_under_lock(req, in_new_region);
842 }
843
844 // Allocation failed, block until control thread reacted, then retry allocation.
845 //
846 // It might happen that one of the threads requesting allocation would unblock
847 // way later after GC happened, only to fail the second allocation, because
848 // other threads have already depleted the free storage. In this case, a better
849 // strategy is to try again, as long as GC makes progress (or until at least
850 // one full GC has completed).
851 size_t original_count = shenandoah_policy()->full_gc_count();
852 while (result == nullptr
853 && (_progress_last_gc.is_set() || original_count == shenandoah_policy()->full_gc_count())) {
854 control_thread()->handle_alloc_failure(req);
855 result = allocate_memory_under_lock(req, in_new_region);
856 }
857 } else {
858 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
859 result = allocate_memory_under_lock(req, in_new_region);
860 // Do not call handle_alloc_failure() here, because we cannot block.
861 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
862 }
863
864 if (in_new_region) {
865 control_thread()->notify_heap_changed();
866 }
867
868 if (result != nullptr) {
869 size_t requested = req.size();
870 size_t actual = req.actual_size();
871
872 assert (req.is_lab_alloc() || (requested == actual),
873 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
874 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
875
876 if (req.is_mutator_alloc()) {
877 notify_mutator_alloc_words(actual, false);
878
879 // If we requested more than we were granted, give the rest back to pacer.
880 // This only matters if we are in the same pacing epoch: do not try to unpace
881 // over the budget for the other phase.
882 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
883 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
884 }
885 } else {
886 increase_used(actual*HeapWordSize);
887 }
888 }
889
890 return result;
891 }
892
893 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
894 // If we are dealing with mutator allocation, then we may need to block for safepoint.
895 // We cannot block for safepoint for GC allocations, because there is a high chance
896 // we are already running at safepoint or from stack watermark machinery, and we cannot
897 // block again.
898 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
899 return _free_set->allocate(req, in_new_region);
900 }
901
902 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
903 bool* gc_overhead_limit_was_exceeded) {
904 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
905 return allocate_memory(req);
906 }
907
908 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
909 size_t size,
910 Metaspace::MetadataType mdtype) {
911 MetaWord* result;
912
913 // Inform metaspace OOM to GC heuristics if class unloading is possible.
914 if (heuristics()->can_unload_classes()) {
915 ShenandoahHeuristics* h = heuristics();
916 h->record_metaspace_oom();
917 }
918
919 // Expand and retry allocation
920 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
921 if (result != nullptr) {
922 return result;
923 }
924
925 // Start full GC
926 collect(GCCause::_metadata_GC_clear_soft_refs);
927
928 // Retry allocation
929 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
930 if (result != nullptr) {
931 return result;
932 }
933
934 // Expand and retry allocation
935 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
987
988 private:
989 void do_work() {
990 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
991 ShenandoahHeapRegion* r;
992 while ((r =_cs->claim_next()) != nullptr) {
993 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
994 _sh->marked_object_iterate(r, &cl);
995
996 if (ShenandoahPacing) {
997 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
998 }
999
1000 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1001 break;
1002 }
1003 }
1004 }
1005 };
1006
1007 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1008 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1009 workers()->run_task(&task);
1010 }
1011
1012 void ShenandoahHeap::trash_cset_regions() {
1013 ShenandoahHeapLocker locker(lock());
1014
1015 ShenandoahCollectionSet* set = collection_set();
1016 ShenandoahHeapRegion* r;
1017 set->clear_current_index();
1018 while ((r = set->next()) != nullptr) {
1019 r->make_trash();
1020 }
1021 collection_set()->clear();
1022 }
1023
1024 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1025 st->print_cr("Heap Regions:");
1026 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1027 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1028 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1029 st->print_cr("UWM=update watermark, U=used");
1030 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1031 st->print_cr("S=shared allocs, L=live data");
1032 st->print_cr("CP=critical pins");
1033
1034 for (size_t i = 0; i < num_regions(); i++) {
1035 get_region(i)->print_on(st);
1036 }
1037 }
1038
1039 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1040 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1041
1042 oop humongous_obj = cast_to_oop(start->bottom());
1043 size_t size = humongous_obj->size();
1044 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1045 size_t index = start->index() + required_regions - 1;
1046
1047 assert(!start->has_live(), "liveness must be zero");
1048
1049 for(size_t i = 0; i < required_regions; i++) {
1050 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1051 // as it expects that every region belongs to a humongous region starting with a humongous start region.
1052 ShenandoahHeapRegion* region = get_region(index --);
1053
1054 assert(region->is_humongous(), "expect correct humongous start or continuation");
1055 assert(!region->is_cset(), "Humongous region should not be in collection set");
1056
1057 region->make_trash_immediate();
1058 }
1059 }
1060
1061 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1062 public:
1063 ShenandoahCheckCleanGCLABClosure() {}
1064 void do_thread(Thread* thread) {
1065 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1066 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1067 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1068 }
1069 };
1070
1071 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1072 private:
1073 bool const _resize;
1074 public:
1075 ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1076 void do_thread(Thread* thread) {
1077 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1078 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1079 gclab->retire();
1080 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1081 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1082 }
1083 }
1084 };
1085
1086 void ShenandoahHeap::labs_make_parsable() {
1087 assert(UseTLAB, "Only call with UseTLAB");
1088
1089 ShenandoahRetireGCLABClosure cl(false);
1090
1091 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1092 ThreadLocalAllocBuffer& tlab = t->tlab();
1093 tlab.make_parsable();
1094 cl.do_thread(t);
1095 }
1096
1097 workers()->threads_do(&cl);
1098 }
1099
1100 void ShenandoahHeap::tlabs_retire(bool resize) {
1101 assert(UseTLAB, "Only call with UseTLAB");
1102 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1103
1104 ThreadLocalAllocStats stats;
1105
1106 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1107 ThreadLocalAllocBuffer& tlab = t->tlab();
1108 tlab.retire(&stats);
1109 if (resize) {
1110 tlab.resize();
1111 }
1112 }
1113
1114 stats.publish();
1115
1116 #ifdef ASSERT
1117 ShenandoahCheckCleanGCLABClosure cl;
1118 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1119 cl.do_thread(t);
1120 }
1121 workers()->threads_do(&cl);
1122 #endif
1123 }
1124
1125 void ShenandoahHeap::gclabs_retire(bool resize) {
1126 assert(UseTLAB, "Only call with UseTLAB");
1127 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1128
1129 ShenandoahRetireGCLABClosure cl(resize);
1130 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1131 cl.do_thread(t);
1132 }
1133 workers()->threads_do(&cl);
1134
1135 if (safepoint_workers() != nullptr) {
1136 safepoint_workers()->threads_do(&cl);
1137 }
1138 }
1139
1140 // Returns size in bytes
1141 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1142 // Return the max allowed size, and let the allocation path
1143 // figure out the safe size for current allocation.
1144 return ShenandoahHeapRegion::max_tlab_size_bytes();
1145 }
1146
1147 size_t ShenandoahHeap::max_tlab_size() const {
1148 // Returns size in words
1149 return ShenandoahHeapRegion::max_tlab_size_words();
1150 }
1151
1152 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1176 }
1177 return nullptr;
1178 }
1179
1180 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1181 ShenandoahHeapRegion* r = heap_region_containing(addr);
1182 return r->block_is_obj(addr);
1183 }
1184
1185 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1186 return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1187 }
1188
1189 void ShenandoahHeap::prepare_for_verify() {
1190 if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1191 labs_make_parsable();
1192 }
1193 }
1194
1195 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1196 tcl->do_thread(_control_thread);
1197 workers()->threads_do(tcl);
1198 if (_safepoint_workers != nullptr) {
1199 _safepoint_workers->threads_do(tcl);
1200 }
1201 }
1202
1203 void ShenandoahHeap::print_tracing_info() const {
1204 LogTarget(Info, gc, stats) lt;
1205 if (lt.is_enabled()) {
1206 ResourceMark rm;
1207 LogStream ls(lt);
1208
1209 phase_timings()->print_global_on(&ls);
1210
1211 ls.cr();
1212 ls.cr();
1213
1214 shenandoah_policy()->print_gc_stats(&ls);
1215
1216 ls.cr();
1217 ls.cr();
1218 }
1219 }
1220
1221 void ShenandoahHeap::verify(VerifyOption vo) {
1222 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1223 if (ShenandoahVerify) {
1224 verifier()->verify_generic(vo);
1225 } else {
1226 // TODO: Consider allocating verification bitmaps on demand,
1227 // and turn this on unconditionally.
1228 }
1229 }
1230 }
1231 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1232 return _free_set->capacity();
1233 }
1234
1235 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1236 private:
1237 MarkBitMap* _bitmap;
1238 ShenandoahScanObjectStack* _oop_stack;
1239 ShenandoahHeap* const _heap;
1240 ShenandoahMarkingContext* const _marking_context;
1535 const uint active_workers = workers()->active_workers();
1536 const size_t n_regions = num_regions();
1537 size_t stride = ShenandoahParallelRegionStride;
1538 if (stride == 0 && active_workers > 1) {
1539 // Automatically derive the stride to balance the work between threads
1540 // evenly. Do not try to split work if below the reasonable threshold.
1541 constexpr size_t threshold = 4096;
1542 stride = n_regions <= threshold ?
1543 threshold :
1544 (n_regions + active_workers - 1) / active_workers;
1545 }
1546
1547 if (n_regions > stride && active_workers > 1) {
1548 ShenandoahParallelHeapRegionTask task(blk, stride);
1549 workers()->run_task(&task);
1550 } else {
1551 heap_region_iterate(blk);
1552 }
1553 }
1554
1555 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1556 private:
1557 ShenandoahMarkingContext* const _ctx;
1558 public:
1559 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1560
1561 void heap_region_do(ShenandoahHeapRegion* r) {
1562 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1563 if (r->is_active()) {
1564 // Check if region needs updating its TAMS. We have updated it already during concurrent
1565 // reset, so it is very likely we don't need to do another write here.
1566 if (_ctx->top_at_mark_start(r) != r->top()) {
1567 _ctx->capture_top_at_mark_start(r);
1568 }
1569 } else {
1570 assert(_ctx->top_at_mark_start(r) == r->top(),
1571 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1572 }
1573 }
1574
1575 bool is_thread_safe() { return true; }
1576 };
1577
1578 class ShenandoahRendezvousClosure : public HandshakeClosure {
1579 public:
1580 inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1581 inline void do_thread(Thread* thread) {}
1582 };
1583
1584 void ShenandoahHeap::rendezvous_threads(const char* name) {
1585 ShenandoahRendezvousClosure cl(name);
1586 Handshake::execute(&cl);
1587 }
1588
1589 void ShenandoahHeap::recycle_trash() {
1590 free_set()->recycle_trash();
1591 }
1592
1593 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1594 private:
1595 ShenandoahMarkingContext* const _ctx;
1596 public:
1597 ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1598
1599 void heap_region_do(ShenandoahHeapRegion* r) {
1600 if (r->is_active()) {
1601 // Reset live data and set TAMS optimistically. We would recheck these under the pause
1602 // anyway to capture any updates that happened since now.
1603 r->clear_live_data();
1604 _ctx->capture_top_at_mark_start(r);
1605 }
1606 }
1607
1608 bool is_thread_safe() { return true; }
1609 };
1610
1611 void ShenandoahHeap::prepare_gc() {
1612 reset_mark_bitmap();
1613
1614 ShenandoahResetUpdateRegionStateClosure cl;
1615 parallel_heap_region_iterate(&cl);
1616 }
1617
1618 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1619 private:
1620 ShenandoahMarkingContext* const _ctx;
1621 ShenandoahHeapLock* const _lock;
1622
1623 public:
1624 ShenandoahFinalMarkUpdateRegionStateClosure() :
1625 _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1626
1627 void heap_region_do(ShenandoahHeapRegion* r) {
1628 if (r->is_active()) {
1629 // All allocations past TAMS are implicitly live, adjust the region data.
1630 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1631 HeapWord *tams = _ctx->top_at_mark_start(r);
1632 HeapWord *top = r->top();
1633 if (top > tams) {
1634 r->increase_live_data_alloc_words(pointer_delta(top, tams));
1635 }
1636
1637 // We are about to select the collection set, make sure it knows about
1638 // current pinning status. Also, this allows trashing more regions that
1639 // now have their pinning status dropped.
1640 if (r->is_pinned()) {
1641 if (r->pin_count() == 0) {
1642 ShenandoahHeapLocker locker(_lock);
1643 r->make_unpinned();
1644 }
1645 } else {
1646 if (r->pin_count() > 0) {
1647 ShenandoahHeapLocker locker(_lock);
1648 r->make_pinned();
1649 }
1650 }
1651
1652 // Remember limit for updating refs. It's guaranteed that we get no
1653 // from-space-refs written from here on.
1654 r->set_update_watermark_at_safepoint(r->top());
1655 } else {
1656 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1657 assert(_ctx->top_at_mark_start(r) == r->top(),
1658 "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1659 }
1660 }
1661
1662 bool is_thread_safe() { return true; }
1663 };
1664
1665 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1666 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1667 {
1668 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1669 ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1670 ShenandoahFinalMarkUpdateRegionStateClosure cl;
1671 parallel_heap_region_iterate(&cl);
1672
1673 assert_pinned_region_status();
1674 }
1675
1676 {
1677 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1678 ShenandoahPhaseTimings::degen_gc_choose_cset);
1679 ShenandoahHeapLocker locker(lock());
1680 _collection_set->clear();
1681 heuristics()->choose_collection_set(_collection_set);
1682 }
1683
1684 {
1685 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1686 ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1687 ShenandoahHeapLocker locker(lock());
1688 _free_set->rebuild();
1689 }
1690 }
1691
1692 void ShenandoahHeap::do_class_unloading() {
1693 _unloader.unload();
1694 }
1695
1696 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1697 // Weak refs processing
1698 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1699 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1700 ShenandoahTimingsTracker t(phase);
1701 ShenandoahGCWorkerPhase worker_phase(phase);
1702 ref_processor()->process_references(phase, workers(), false /* concurrent */);
1703 }
1704
1705 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1706 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1707
1708 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1709 // make them parsable for update code to work correctly. Plus, we can compute new sizes
1710 // for future GCLABs here.
1711 if (UseTLAB) {
1712 ShenandoahGCPhase phase(concurrent ?
1713 ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1714 ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1715 gclabs_retire(ResizeTLAB);
1716 }
1717
1718 _update_refs_iterator.reset();
1719 }
1720
1721 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1722 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1723 if (_gc_state_changed) {
1724 _gc_state_changed = false;
1725 char state = gc_state();
1726 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1727 ShenandoahThreadLocalData::set_gc_state(t, state);
1728 }
1729 }
1730 }
1731
1732 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1733 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1734 _gc_state.set_cond(mask, value);
1735 _gc_state_changed = true;
1736 }
1737
1738 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1739 assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1740 set_gc_state(MARKING, in_progress);
1741 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1742 }
1743
1744 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1745 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1746 set_gc_state(EVACUATION, in_progress);
1747 }
1748
1749 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1750 if (in_progress) {
1751 _concurrent_strong_root_in_progress.set();
1752 } else {
1753 _concurrent_strong_root_in_progress.unset();
1754 }
1755 }
1756
1757 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1758 set_gc_state(WEAK_ROOTS, cond);
1759 }
1760
1761 GCTracer* ShenandoahHeap::tracer() {
1762 return shenandoah_policy()->tracer();
1763 }
1764
1765 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1766 return _free_set->used();
1767 }
1768
1769 bool ShenandoahHeap::try_cancel_gc() {
1770 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1771 return prev == CANCELLABLE;
1772 }
1773
1774 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1775 if (try_cancel_gc()) {
1776 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1777 log_info(gc)("%s", msg.buffer());
1778 Events::log(Thread::current(), "%s", msg.buffer());
1779 }
1780 }
1781
1782 uint ShenandoahHeap::max_workers() {
1783 return _max_workers;
1784 }
1785
1786 void ShenandoahHeap::stop() {
1787 // The shutdown sequence should be able to terminate when GC is running.
1788
1789 // Step 0. Notify policy to disable event recording.
1790 _shenandoah_policy->record_shutdown();
1791
1792 // Step 1. Notify control thread that we are in shutdown.
1793 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1794 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1795 control_thread()->prepare_for_graceful_shutdown();
1796
1797 // Step 2. Notify GC workers that we are cancelling GC.
1798 cancel_gc(GCCause::_shenandoah_stop_vm);
1799
1800 // Step 3. Wait until GC worker exits normally.
1801 control_thread()->stop();
1802 }
1803
1804 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1805 if (!unload_classes()) return;
1806 ClassUnloadingContext ctx(_workers->active_workers(),
1807 true /* unregister_nmethods_during_purge */,
1808 false /* lock_codeblob_free_separately */);
1809
1810 // Unload classes and purge SystemDictionary.
1811 {
1812 ShenandoahPhaseTimings::Phase phase = full_gc ?
1813 ShenandoahPhaseTimings::full_gc_purge_class_unload :
1814 ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1815 ShenandoahIsAliveSelector is_alive;
1816 {
1817 CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
1818 ShenandoahGCPhase gc_phase(phase);
1819 ShenandoahGCWorkerPhase worker_phase(phase);
1820 bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
1821
1822 uint num_workers = _workers->active_workers();
1823 ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
1824 _workers->run_task(&unlink_task);
1825 }
1826 // Release unloaded nmethods's memory.
1827 ClassUnloadingContext::context()->purge_and_free_nmethods();
1828 }
1829
1830 {
1831 ShenandoahGCPhase phase(full_gc ?
1832 ShenandoahPhaseTimings::full_gc_purge_cldg :
1833 ShenandoahPhaseTimings::degen_gc_purge_cldg);
1834 ClassLoaderDataGraph::purge(true /* at_safepoint */);
1835 }
1836 // Resize and verify metaspace
1837 MetaspaceGC::compute_new_size();
1838 DEBUG_ONLY(MetaspaceUtils::verify();)
1839 }
1840
1841 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
1842 // so they should not have forwarded oops.
1843 // However, we do need to "null" dead oops in the roots, if can not be done
1844 // in concurrent cycles.
1845 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
1846 uint num_workers = _workers->active_workers();
1847 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
1848 ShenandoahPhaseTimings::full_gc_purge_weak_par :
1849 ShenandoahPhaseTimings::degen_gc_purge_weak_par;
1850 ShenandoahGCPhase phase(timing_phase);
1851 ShenandoahGCWorkerPhase worker_phase(timing_phase);
1852 // Cleanup weak roots
1853 if (has_forwarded_objects()) {
1854 ShenandoahForwardedIsAliveClosure is_alive;
1855 ShenandoahUpdateRefsClosure keep_alive;
1856 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
1857 cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
1858 _workers->run_task(&cleaning_task);
1859 } else {
1860 ShenandoahIsAliveClosure is_alive;
1861 #ifdef ASSERT
1865 #else
1866 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
1867 cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
1868 #endif
1869 _workers->run_task(&cleaning_task);
1870 }
1871 }
1872
1873 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
1874 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1875 assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
1876 ShenandoahGCPhase phase(full_gc ?
1877 ShenandoahPhaseTimings::full_gc_purge :
1878 ShenandoahPhaseTimings::degen_gc_purge);
1879 stw_weak_refs(full_gc);
1880 stw_process_weak_roots(full_gc);
1881 stw_unload_classes(full_gc);
1882 }
1883
1884 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1885 set_gc_state(HAS_FORWARDED, cond);
1886 }
1887
1888 void ShenandoahHeap::set_unload_classes(bool uc) {
1889 _unload_classes.set_cond(uc);
1890 }
1891
1892 bool ShenandoahHeap::unload_classes() const {
1893 return _unload_classes.is_set();
1894 }
1895
1896 address ShenandoahHeap::in_cset_fast_test_addr() {
1897 ShenandoahHeap* heap = ShenandoahHeap::heap();
1898 assert(heap->collection_set() != nullptr, "Sanity");
1899 return (address) heap->collection_set()->biased_map_address();
1900 }
1901
1902 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1903 return Atomic::load(&_bytes_allocated_since_gc_start);
1904 }
1905
1906 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1907 Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
1908 }
1909
1910 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1911 _degenerated_gc_in_progress.set_cond(in_progress);
1912 }
1913
1914 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1915 _full_gc_in_progress.set_cond(in_progress);
1916 }
1917
1918 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1919 assert (is_full_gc_in_progress(), "should be");
1920 _full_gc_move_in_progress.set_cond(in_progress);
1921 }
1922
1923 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1924 set_gc_state(UPDATEREFS, in_progress);
1925 }
1926
1927 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1928 ShenandoahCodeRoots::register_nmethod(nm);
1929 }
1930
1931 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
1932 ShenandoahCodeRoots::unregister_nmethod(nm);
1933 }
1934
1935 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
1936 heap_region_containing(o)->record_pin();
1937 }
1938
1939 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
1940 ShenandoahHeapRegion* r = heap_region_containing(o);
1941 assert(r != nullptr, "Sanity");
1942 assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
1943 r->record_unpin();
1944 }
1951 if (r->is_active()) {
1952 if (r->is_pinned()) {
1953 if (r->pin_count() == 0) {
1954 r->make_unpinned();
1955 }
1956 } else {
1957 if (r->pin_count() > 0) {
1958 r->make_pinned();
1959 }
1960 }
1961 }
1962 }
1963
1964 assert_pinned_region_status();
1965 }
1966
1967 #ifdef ASSERT
1968 void ShenandoahHeap::assert_pinned_region_status() {
1969 for (size_t i = 0; i < num_regions(); i++) {
1970 ShenandoahHeapRegion* r = get_region(i);
1971 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1972 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
1973 }
1974 }
1975 #endif
1976
1977 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1978 return _gc_timer;
1979 }
1980
1981 void ShenandoahHeap::prepare_concurrent_roots() {
1982 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1983 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1984 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1985 set_concurrent_weak_root_in_progress(true);
1986 if (unload_classes()) {
1987 _unloader.prepare();
1988 }
1989 }
1990
1991 void ShenandoahHeap::finish_concurrent_roots() {
1992 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1993 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1994 if (unload_classes()) {
1995 _unloader.finish();
1996 }
1997 }
1998
1999 #ifdef ASSERT
2000 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2001 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2002
2003 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2004 if (UseDynamicNumberOfGCThreads) {
2005 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2006 } else {
2007 // Use ParallelGCThreads inside safepoints
2008 assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
2009 }
2010 } else {
2011 if (UseDynamicNumberOfGCThreads) {
2012 assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2013 } else {
2014 // Use ConcGCThreads outside safepoints
2015 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2016 }
2017 }
2018 }
2019 #endif
2020
2021 ShenandoahVerifier* ShenandoahHeap::verifier() {
2022 guarantee(ShenandoahVerify, "Should be enabled");
2023 assert (_verifier != nullptr, "sanity");
2024 return _verifier;
2025 }
2026
2027 template<bool CONCURRENT>
2028 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2029 private:
2030 ShenandoahHeap* _heap;
2031 ShenandoahRegionIterator* _regions;
2032 public:
2033 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2034 WorkerTask("Shenandoah Update References"),
2035 _heap(ShenandoahHeap::heap()),
2036 _regions(regions) {
2037 }
2038
2039 void work(uint worker_id) {
2040 if (CONCURRENT) {
2041 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2042 ShenandoahSuspendibleThreadSetJoiner stsj;
2043 do_work<ShenandoahConcUpdateRefsClosure>();
2044 } else {
2045 ShenandoahParallelWorkerSession worker_session(worker_id);
2046 do_work<ShenandoahSTWUpdateRefsClosure>();
2047 }
2048 }
2049
2050 private:
2051 template<class T>
2052 void do_work() {
2053 T cl;
2054 ShenandoahHeapRegion* r = _regions->next();
2055 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2056 while (r != nullptr) {
2057 HeapWord* update_watermark = r->get_update_watermark();
2058 assert (update_watermark >= r->bottom(), "sanity");
2059 if (r->is_active() && !r->is_cset()) {
2060 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2061 }
2062 if (ShenandoahPacing) {
2063 _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2064 }
2065 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2066 return;
2067 }
2068 r = _regions->next();
2069 }
2070 }
2071 };
2072
2073 void ShenandoahHeap::update_heap_references(bool concurrent) {
2074 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2075
2076 if (concurrent) {
2077 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2078 workers()->run_task(&task);
2079 } else {
2080 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2081 workers()->run_task(&task);
2082 }
2083 }
2084
2085
2086 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2087 private:
2088 ShenandoahHeapLock* const _lock;
2089
2090 public:
2091 ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2092
2093 void heap_region_do(ShenandoahHeapRegion* r) {
2094 // Drop unnecessary "pinned" state from regions that does not have CP marks
2095 // anymore, as this would allow trashing them.
2096
2097 if (r->is_active()) {
2098 if (r->is_pinned()) {
2099 if (r->pin_count() == 0) {
2100 ShenandoahHeapLocker locker(_lock);
2101 r->make_unpinned();
2102 }
2103 } else {
2104 if (r->pin_count() > 0) {
2105 ShenandoahHeapLocker locker(_lock);
2106 r->make_pinned();
2107 }
2108 }
2109 }
2110 }
2111
2112 bool is_thread_safe() { return true; }
2113 };
2114
2115 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2116 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2117 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2118
2119 {
2120 ShenandoahGCPhase phase(concurrent ?
2121 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2122 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2123 ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2124 parallel_heap_region_iterate(&cl);
2125
2126 assert_pinned_region_status();
2127 }
2128
2129 {
2130 ShenandoahGCPhase phase(concurrent ?
2131 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2132 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2133 trash_cset_regions();
2134 }
2135 }
2136
2137 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2138 {
2139 ShenandoahGCPhase phase(concurrent ?
2140 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2141 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2142 ShenandoahHeapLocker locker(lock());
2143 _free_set->rebuild();
2144 }
2145 }
2146
2147 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2148 print_on(st);
2149 st->cr();
2150 print_heap_regions_on(st);
2151 }
2152
2153 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2154 size_t slice = r->index() / _bitmap_regions_per_slice;
2155
2156 size_t regions_from = _bitmap_regions_per_slice * slice;
2157 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2158 for (size_t g = regions_from; g < regions_to; g++) {
2159 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2160 if (skip_self && g == r->index()) continue;
2161 if (get_region(g)->is_committed()) {
2162 return true;
2163 }
2189 return false;
2190 }
2191
2192 if (AlwaysPreTouch) {
2193 os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2194 }
2195
2196 return true;
2197 }
2198
2199 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2200 shenandoah_assert_heaplocked();
2201
2202 // Bitmaps in special regions do not need uncommits
2203 if (_bitmap_region_special) {
2204 return true;
2205 }
2206
2207 if (is_bitmap_slice_committed(r, true)) {
2208 // Some other region from the group is still committed, meaning the bitmap
2209 // slice is should stay committed, exit right away.
2210 return true;
2211 }
2212
2213 // Uncommit the bitmap slice:
2214 size_t slice = r->index() / _bitmap_regions_per_slice;
2215 size_t off = _bitmap_bytes_per_slice * slice;
2216 size_t len = _bitmap_bytes_per_slice;
2217 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2218 return false;
2219 }
2220 return true;
2221 }
2222
2223 void ShenandoahHeap::safepoint_synchronize_begin() {
2224 SuspendibleThreadSet::synchronize();
2225 }
2226
2227 void ShenandoahHeap::safepoint_synchronize_end() {
2228 SuspendibleThreadSet::desynchronize();
2229 }
2230
2231 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2232 static const char *msg = "Concurrent uncommit";
2233 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2234 EventMark em("%s", msg);
2235
2236 op_uncommit(shrink_before, shrink_until);
2237 }
2238
2239 void ShenandoahHeap::try_inject_alloc_failure() {
2240 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2241 _inject_alloc_failure.set();
2242 os::naked_short_sleep(1);
2243 if (cancelled_gc()) {
2244 log_info(gc)("Allocation failure was successfully injected");
2245 }
2246 }
2247 }
2248
2249 bool ShenandoahHeap::should_inject_alloc_failure() {
2250 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2251 }
2252
2253 void ShenandoahHeap::initialize_serviceability() {
2254 _memory_pool = new ShenandoahMemoryPool(this);
2255 _cycle_memory_manager.add_pool(_memory_pool);
2256 _stw_memory_manager.add_pool(_memory_pool);
2257 }
2258
2259 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2260 GrowableArray<GCMemoryManager*> memory_managers(2);
2261 memory_managers.append(&_cycle_memory_manager);
2262 memory_managers.append(&_stw_memory_manager);
2263 return memory_managers;
2264 }
2265
2266 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2267 GrowableArray<MemoryPool*> memory_pools(1);
2268 memory_pools.append(_memory_pool);
2269 return memory_pools;
2270 }
2271
2272 MemoryUsage ShenandoahHeap::memory_usage() {
2273 return _memory_pool->get_memory_usage();
2274 }
2275
2276 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2277 _heap(ShenandoahHeap::heap()),
2278 _index(0) {}
2279
2280 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2281 _heap(heap),
2282 _index(0) {}
2283
2284 void ShenandoahRegionIterator::reset() {
2285 _index = 0;
2286 }
2287
2288 bool ShenandoahRegionIterator::has_next() const {
2289 return _index < _heap->num_regions();
2290 }
2291
2292 char ShenandoahHeap::gc_state() const {
2293 return _gc_state.raw_value();
2294 }
2295
2296 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2297 #ifdef ASSERT
2298 assert(_liveness_cache != nullptr, "sanity");
2299 assert(worker_id < _max_workers, "sanity");
2300 for (uint i = 0; i < num_regions(); i++) {
2301 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2302 }
2303 #endif
2304 return _liveness_cache[worker_id];
2305 }
2306
2307 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2308 assert(worker_id < _max_workers, "sanity");
2309 assert(_liveness_cache != nullptr, "sanity");
2310 ShenandoahLiveData* ld = _liveness_cache[worker_id];
2311 for (uint i = 0; i < num_regions(); i++) {
2312 ShenandoahLiveData live = ld[i];
2313 if (live > 0) {
2314 ShenandoahHeapRegion* r = get_region(i);
2315 r->increase_live_data_gc_words(live);
2318 }
2319 }
2320
2321 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2322 if (is_idle()) return false;
2323
2324 // Objects allocated after marking start are implicitly alive, don't need any barriers during
2325 // marking phase.
2326 if (is_concurrent_mark_in_progress() &&
2327 !marking_context()->allocated_after_mark_start(obj)) {
2328 return true;
2329 }
2330
2331 // Can not guarantee obj is deeply good.
2332 if (has_forwarded_objects()) {
2333 return true;
2334 }
2335
2336 return false;
2337 }
|
1 /*
2 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "memory/allocation.hpp"
29 #include "memory/universe.hpp"
30
31 #include "gc/shared/classUnloadingContext.hpp"
32 #include "gc/shared/gcArguments.hpp"
33 #include "gc/shared/gcTimer.hpp"
34 #include "gc/shared/gcTraceTime.inline.hpp"
35 #include "gc/shared/locationPrinter.inline.hpp"
36 #include "gc/shared/memAllocator.hpp"
37 #include "gc/shared/plab.hpp"
38 #include "gc/shared/tlab_globals.hpp"
39
40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
42 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
43 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
44 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
45 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
46 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
47 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
48 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
49 #include "gc/shenandoah/shenandoahControlThread.hpp"
50 #include "gc/shenandoah/shenandoahFreeSet.hpp"
51 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
52 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
53 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
54 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
55 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
56 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
57 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
58 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
59 #include "gc/shenandoah/shenandoahInitLogger.hpp"
60 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
61 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
62 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
63 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
64 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
65 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
66 #include "gc/shenandoah/shenandoahPadding.hpp"
67 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
68 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
69 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
70 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
71 #include "gc/shenandoah/shenandoahSTWMark.hpp"
72 #include "gc/shenandoah/shenandoahUncommitThread.hpp"
73 #include "gc/shenandoah/shenandoahUtils.hpp"
74 #include "gc/shenandoah/shenandoahVerifier.hpp"
75 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
76 #include "gc/shenandoah/shenandoahVMOperations.hpp"
77 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
78 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
79 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
80 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
81 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
82 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
83 #include "utilities/globalDefinitions.hpp"
84
85 #if INCLUDE_JFR
86 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
87 #endif
88
89 #include "classfile/systemDictionary.hpp"
90 #include "code/codeCache.hpp"
91 #include "memory/classLoaderMetaspace.hpp"
92 #include "memory/metaspaceUtils.hpp"
93 #include "oops/compressedOops.inline.hpp"
94 #include "prims/jvmtiTagMap.hpp"
95 #include "runtime/atomic.hpp"
96 #include "runtime/globals.hpp"
97 #include "runtime/interfaceSupport.inline.hpp"
98 #include "runtime/java.hpp"
99 #include "runtime/orderAccess.hpp"
100 #include "runtime/safepointMechanism.hpp"
101 #include "runtime/threads.hpp"
102 #include "runtime/vmThread.hpp"
103 #include "services/mallocTracker.hpp"
104 #include "services/memTracker.hpp"
105 #include "utilities/events.hpp"
106 #include "utilities/powerOfTwo.hpp"
107
108 class ShenandoahPretouchHeapTask : public WorkerTask {
109 private:
110 ShenandoahRegionIterator _regions;
111 const size_t _page_size;
112 public:
113 ShenandoahPretouchHeapTask(size_t page_size) :
114 WorkerTask("Shenandoah Pretouch Heap"),
115 _page_size(page_size) {}
116
117 virtual void work(uint worker_id) {
118 ShenandoahHeapRegion* r = _regions.next();
119 while (r != nullptr) {
120 if (r->is_committed()) {
121 os::pretouch_memory(r->bottom(), r->end(), _page_size);
157 jint ShenandoahHeap::initialize() {
158 //
159 // Figure out heap sizing
160 //
161
162 size_t init_byte_size = InitialHeapSize;
163 size_t min_byte_size = MinHeapSize;
164 size_t max_byte_size = MaxHeapSize;
165 size_t heap_alignment = HeapAlignment;
166
167 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
168
169 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
170 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
171
172 _num_regions = ShenandoahHeapRegion::region_count();
173 assert(_num_regions == (max_byte_size / reg_size_bytes),
174 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
175 _num_regions, max_byte_size, reg_size_bytes);
176
177 size_t num_committed_regions = init_byte_size / reg_size_bytes;
178 num_committed_regions = MIN2(num_committed_regions, _num_regions);
179 assert(num_committed_regions <= _num_regions, "sanity");
180 _initial_size = num_committed_regions * reg_size_bytes;
181
182 size_t num_min_regions = min_byte_size / reg_size_bytes;
183 num_min_regions = MIN2(num_min_regions, _num_regions);
184 assert(num_min_regions <= _num_regions, "sanity");
185 _minimum_size = num_min_regions * reg_size_bytes;
186
187 // Default to max heap size.
188 _soft_max_size = _num_regions * reg_size_bytes;
189
190 _committed = _initial_size;
191
192 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
193 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
194 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
195
196 //
197 // Reserve and commit memory for heap
198 //
199
200 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
201 initialize_reserved_region(heap_rs);
202 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
203 _heap_region_special = heap_rs.special();
204
205 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
206 "Misaligned heap: " PTR_FORMAT, p2i(base()));
207 os::trace_page_sizes_for_requested_size("Heap",
208 max_byte_size, heap_rs.page_size(), heap_alignment,
209 heap_rs.base(), heap_rs.size());
210
211 #if SHENANDOAH_OPTIMIZED_MARKTASK
212 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
213 // Fail if we ever attempt to address more than we can.
214 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
215 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
216 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
217 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
218 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
219 vm_exit_during_initialization("Fatal Error", buf);
220 }
221 #endif
222
223 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
224 if (!_heap_region_special) {
225 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
226 "Cannot commit heap memory");
227 }
228
229 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
230
231 // Now we know the number of regions and heap sizes, initialize the heuristics.
232 initialize_heuristics();
233
234 assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
235
236 //
237 // Worker threads must be initialized after the barrier is configured
238 //
239 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
240 if (_workers == nullptr) {
241 vm_exit_during_initialization("Failed necessary allocation.");
242 } else {
243 _workers->initialize_workers();
244 }
245
246 if (ParallelGCThreads > 1) {
247 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
248 _safepoint_workers->initialize_workers();
249 }
250
251 //
252 // Reserve and commit memory for bitmap(s)
253 //
254
255 size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
256 _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
257
258 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
259
260 guarantee(bitmap_bytes_per_region != 0,
261 "Bitmap bytes per region should not be zero");
262 guarantee(is_power_of_2(bitmap_bytes_per_region),
263 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
264
265 if (bitmap_page_size > bitmap_bytes_per_region) {
266 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
267 _bitmap_bytes_per_slice = bitmap_page_size;
268 } else {
269 _bitmap_regions_per_slice = 1;
270 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
271 }
272
273 guarantee(_bitmap_regions_per_slice >= 1,
274 "Should have at least one region per slice: " SIZE_FORMAT,
275 _bitmap_regions_per_slice);
276
277 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
278 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
279 _bitmap_bytes_per_slice, bitmap_page_size);
280
281 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
282 os::trace_page_sizes_for_requested_size("Mark Bitmap",
283 bitmap_size_orig, bitmap.page_size(), bitmap_page_size,
284 bitmap.base(),
285 bitmap.size());
286 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
287 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
288 _bitmap_region_special = bitmap.special();
289
290 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
291 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
292 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
293 if (!_bitmap_region_special) {
294 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
295 "Cannot commit bitmap memory");
296 }
297
298 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
299
300 if (ShenandoahVerify) {
301 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
302 os::trace_page_sizes_for_requested_size("Verify Bitmap",
303 bitmap_size_orig, verify_bitmap.page_size(), bitmap_page_size,
304 verify_bitmap.base(),
305 verify_bitmap.size());
306 if (!verify_bitmap.special()) {
307 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
308 "Cannot commit verification bitmap memory");
309 }
310 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
311 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
312 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
313 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
314 }
315
316 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
317 size_t aux_bitmap_page_size = bitmap_page_size;
318
319 ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
320 os::trace_page_sizes_for_requested_size("Aux Bitmap",
321 bitmap_size_orig, aux_bitmap.page_size(), aux_bitmap_page_size,
322 aux_bitmap.base(), aux_bitmap.size());
323 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
324 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
325 _aux_bitmap_region_special = aux_bitmap.special();
326 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
327
328 //
329 // Create regions and region sets
330 //
331 size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
332 size_t region_storage_size_orig = region_align * _num_regions;
333 size_t region_storage_size = align_up(region_storage_size_orig,
334 MAX2(region_page_size, os::vm_allocation_granularity()));
335
336 ReservedSpace region_storage(region_storage_size, region_page_size);
337 os::trace_page_sizes_for_requested_size("Region Storage",
338 region_storage_size_orig, region_storage.page_size(), region_page_size,
339 region_storage.base(), region_storage.size());
340 MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
341 if (!region_storage.special()) {
342 os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
343 "Cannot commit region memory");
344 }
345
346 // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
347 // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
348 // If not successful, bite a bullet and allocate at whatever address.
349 {
350 const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
351 const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
352 const size_t cset_page_size = os::vm_page_size();
353
354 uintptr_t min = round_up_power_of_2(cset_align);
355 uintptr_t max = (1u << 30u);
356 ReservedSpace cset_rs;
357
358 for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
359 char* req_addr = (char*)addr;
360 assert(is_aligned(req_addr, cset_align), "Should be aligned");
361 cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
362 if (cset_rs.is_reserved()) {
363 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
364 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
365 break;
366 }
367 }
368
369 if (_collection_set == nullptr) {
370 cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
371 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
372 }
373 os::trace_page_sizes_for_requested_size("Collection Set",
374 cset_size, cset_rs.page_size(), cset_page_size,
375 cset_rs.base(),
376 cset_rs.size());
377 }
378
379 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
380 _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
381 _free_set = new ShenandoahFreeSet(this, _num_regions);
382
383 {
384 ShenandoahHeapLocker locker(lock());
385
386 for (size_t i = 0; i < _num_regions; i++) {
387 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
388 bool is_committed = i < num_committed_regions;
389 void* loc = region_storage.base() + i * region_align;
390
391 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
392 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
393
394 _marking_context->initialize_top_at_mark_start(r);
395 _regions[i] = r;
396 assert(!collection_set()->is_in(i), "New region should not be in collection set");
397
398 _affiliations[i] = ShenandoahAffiliation::FREE;
399 }
400
401 size_t young_cset_regions, old_cset_regions;
402
403 // We are initializing free set. We ignore cset region tallies.
404 size_t first_old, last_old, num_old;
405 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
406 _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
407 }
408
409 if (AlwaysPreTouch) {
410 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
411 // before initialize() below zeroes it with initializing thread. For any given region,
412 // we touch the region and the corresponding bitmaps from the same thread.
413 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
414
415 _pretouch_heap_page_size = heap_page_size;
416 _pretouch_bitmap_page_size = bitmap_page_size;
417
418 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
419 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
420
421 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
422 _workers->run_task(&bcl);
423
424 ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
425 _workers->run_task(&hcl);
426 }
427
428 //
429 // Initialize the rest of GC subsystems
430 //
431
432 _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
433 for (uint worker = 0; worker < _max_workers; worker++) {
434 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
435 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
436 }
437
438 // There should probably be Shenandoah-specific options for these,
439 // just as there are G1-specific options.
440 {
441 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
442 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
443 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
444 }
445
446 _monitoring_support = new ShenandoahMonitoringSupport(this);
447 _phase_timings = new ShenandoahPhaseTimings(max_workers());
448 ShenandoahCodeRoots::initialize();
449
450 if (ShenandoahPacing) {
451 _pacer = new ShenandoahPacer(this);
452 _pacer->setup_for_idle();
453 }
454
455 initialize_controller();
456
457 if (ShenandoahUncommit) {
458 _uncommit_thread = new ShenandoahUncommitThread(this);
459 }
460
461 print_init_logger();
462
463 return JNI_OK;
464 }
465
466 void ShenandoahHeap::initialize_controller() {
467 _control_thread = new ShenandoahControlThread();
468 }
469
470 void ShenandoahHeap::print_init_logger() const {
471 ShenandoahInitLogger::print();
472 }
473
474 void ShenandoahHeap::initialize_mode() {
475 if (ShenandoahGCMode != nullptr) {
476 if (strcmp(ShenandoahGCMode, "satb") == 0) {
477 _gc_mode = new ShenandoahSATBMode();
478 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
479 _gc_mode = new ShenandoahPassiveMode();
480 } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
481 _gc_mode = new ShenandoahGenerationalMode();
482 } else {
483 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
484 }
485 } else {
486 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
487 }
488 _gc_mode->initialize_flags();
489 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
490 vm_exit_during_initialization(
491 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
492 _gc_mode->name()));
493 }
494 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
495 vm_exit_during_initialization(
496 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
497 _gc_mode->name()));
498 }
499 }
500
501 void ShenandoahHeap::initialize_heuristics() {
502 _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
503 _global_generation->initialize_heuristics(mode());
504 }
505
506 #ifdef _MSC_VER
507 #pragma warning( push )
508 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
509 #endif
510
511 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
512 CollectedHeap(),
513 _gc_generation(nullptr),
514 _active_generation(nullptr),
515 _initial_size(0),
516 _committed(0),
517 _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
518 _workers(nullptr),
519 _safepoint_workers(nullptr),
520 _heap_region_special(false),
521 _num_regions(0),
522 _regions(nullptr),
523 _affiliations(nullptr),
524 _gc_state_changed(false),
525 _gc_no_progress_count(0),
526 _cancel_requested_time(0),
527 _update_refs_iterator(this),
528 _global_generation(nullptr),
529 _control_thread(nullptr),
530 _uncommit_thread(nullptr),
531 _young_generation(nullptr),
532 _old_generation(nullptr),
533 _shenandoah_policy(policy),
534 _gc_mode(nullptr),
535 _free_set(nullptr),
536 _pacer(nullptr),
537 _verifier(nullptr),
538 _phase_timings(nullptr),
539 _monitoring_support(nullptr),
540 _memory_pool(nullptr),
541 _stw_memory_manager("Shenandoah Pauses"),
542 _cycle_memory_manager("Shenandoah Cycles"),
543 _gc_timer(new ConcurrentGCTimer()),
544 _soft_ref_policy(),
545 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
546 _marking_context(nullptr),
547 _bitmap_size(0),
548 _bitmap_regions_per_slice(0),
549 _bitmap_bytes_per_slice(0),
550 _bitmap_region_special(false),
551 _aux_bitmap_region_special(false),
552 _liveness_cache(nullptr),
553 _collection_set(nullptr)
554 {
555 // Initialize GC mode early, many subsequent initialization procedures depend on it
556 initialize_mode();
557 _cancelled_gc.set(GCCause::_no_gc);
558 }
559
560 #ifdef _MSC_VER
561 #pragma warning( pop )
562 #endif
563
564 void ShenandoahHeap::print_on(outputStream* st) const {
565 st->print_cr("Shenandoah Heap");
566 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
567 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
568 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
569 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
570 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
571 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
572 num_regions(),
573 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
574 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
575
576 st->print("Status: ");
577 if (has_forwarded_objects()) st->print("has forwarded objects, ");
578 if (!mode()->is_generational()) {
579 if (is_concurrent_mark_in_progress()) st->print("marking,");
580 } else {
581 if (is_concurrent_old_mark_in_progress()) st->print("old marking, ");
582 if (is_concurrent_young_mark_in_progress()) st->print("young marking, ");
583 }
584 if (is_evacuation_in_progress()) st->print("evacuating, ");
585 if (is_update_refs_in_progress()) st->print("updating refs, ");
586 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
587 if (is_full_gc_in_progress()) st->print("full gc, ");
588 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
589 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
590 if (is_concurrent_strong_root_in_progress() &&
591 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
592
593 if (cancelled_gc()) {
594 st->print("cancelled");
595 } else {
596 st->print("not cancelled");
597 }
598 st->cr();
599
600 st->print_cr("Reserved region:");
601 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
602 p2i(reserved_region().start()),
603 p2i(reserved_region().end()));
607 if (cset != nullptr) {
608 st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
609 st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
610 } else {
611 st->print_cr(" (null)");
612 }
613
614 st->cr();
615 MetaspaceUtils::print_on(st);
616
617 if (Verbose) {
618 st->cr();
619 print_heap_regions_on(st);
620 }
621 }
622
623 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
624 public:
625 void do_thread(Thread* thread) {
626 assert(thread != nullptr, "Sanity");
627 ShenandoahThreadLocalData::initialize_gclab(thread);
628 }
629 };
630
631 void ShenandoahHeap::post_initialize() {
632 CollectedHeap::post_initialize();
633
634 // Schedule periodic task to report on gc thread CPU utilization
635 _mmu_tracker.initialize();
636
637 MutexLocker ml(Threads_lock);
638
639 ShenandoahInitWorkerGCLABClosure init_gclabs;
640 _workers->threads_do(&init_gclabs);
641
642 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
643 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
644 _workers->set_initialize_gclab();
645
646 // Note that the safepoint workers may require gclabs if the threads are used to create a heap dump
647 // during a concurrent evacuation phase.
648 if (_safepoint_workers != nullptr) {
649 _safepoint_workers->threads_do(&init_gclabs);
650 _safepoint_workers->set_initialize_gclab();
651 }
652
653 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
654 }
655
656 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
657 return _global_generation->heuristics();
658 }
659
660 size_t ShenandoahHeap::used() const {
661 return global_generation()->used();
662 }
663
664 size_t ShenandoahHeap::committed() const {
665 return Atomic::load(&_committed);
666 }
667
668 void ShenandoahHeap::increase_committed(size_t bytes) {
669 shenandoah_assert_heaplocked_or_safepoint();
670 _committed += bytes;
671 }
672
673 void ShenandoahHeap::decrease_committed(size_t bytes) {
674 shenandoah_assert_heaplocked_or_safepoint();
675 _committed -= bytes;
676 }
677
678 // For tracking usage based on allocations, it should be the case that:
679 // * The sum of regions::used == heap::used
680 // * The sum of a generation's regions::used == generation::used
681 // * The sum of a generation's humongous regions::free == generation::humongous_waste
682 // These invariants are checked by the verifier on GC safepoints.
683 //
684 // Additional notes:
685 // * When a mutator's allocation request causes a region to be retired, the
686 // free memory left in that region is considered waste. It does not contribute
687 // to the usage, but it _does_ contribute to allocation rate.
688 // * The bottom of a PLAB must be aligned on card size. In some cases this will
689 // require padding in front of the PLAB (a filler object). Because this padding
690 // is included in the region's used memory we include the padding in the usage
691 // accounting as waste.
692 // * Mutator allocations are used to compute an allocation rate. They are also
693 // sent to the Pacer for those purposes.
694 // * There are three sources of waste:
695 // 1. The padding used to align a PLAB on card size
696 // 2. Region's free is less than minimum TLAB size and is retired
697 // 3. The unused portion of memory in the last region of a humongous object
698 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
699 size_t actual_bytes = req.actual_size() * HeapWordSize;
700 size_t wasted_bytes = req.waste() * HeapWordSize;
701 ShenandoahGeneration* generation = generation_for(req.affiliation());
702
703 if (req.is_gc_alloc()) {
704 assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
705 increase_used(generation, actual_bytes + wasted_bytes);
706 } else {
707 assert(req.is_mutator_alloc(), "Expected mutator alloc here");
708 // padding and actual size both count towards allocation counter
709 generation->increase_allocated(actual_bytes + wasted_bytes);
710
711 // only actual size counts toward usage for mutator allocations
712 increase_used(generation, actual_bytes);
713
714 // notify pacer of both actual size and waste
715 notify_mutator_alloc_words(req.actual_size(), req.waste());
716
717 if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
718 increase_humongous_waste(generation,wasted_bytes);
719 }
720 }
721 }
722
723 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
724 generation->increase_humongous_waste(bytes);
725 if (!generation->is_global()) {
726 global_generation()->increase_humongous_waste(bytes);
727 }
728 }
729
730 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
731 generation->decrease_humongous_waste(bytes);
732 if (!generation->is_global()) {
733 global_generation()->decrease_humongous_waste(bytes);
734 }
735 }
736
737 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
738 generation->increase_used(bytes);
739 if (!generation->is_global()) {
740 global_generation()->increase_used(bytes);
741 }
742 }
743
744 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
745 generation->decrease_used(bytes);
746 if (!generation->is_global()) {
747 global_generation()->decrease_used(bytes);
748 }
749 }
750
751 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
752 if (ShenandoahPacing) {
753 control_thread()->pacing_notify_alloc(words);
754 if (waste > 0) {
755 pacer()->claim_for_alloc<true>(waste);
756 }
757 }
758 }
759
760 size_t ShenandoahHeap::capacity() const {
761 return committed();
762 }
763
764 size_t ShenandoahHeap::max_capacity() const {
765 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
766 }
767
768 size_t ShenandoahHeap::soft_max_capacity() const {
769 size_t v = Atomic::load(&_soft_max_size);
770 assert(min_capacity() <= v && v <= max_capacity(),
771 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
772 min_capacity(), v, max_capacity());
773 return v;
774 }
775
776 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
777 assert(min_capacity() <= v && v <= max_capacity(),
778 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
779 min_capacity(), v, max_capacity());
780 Atomic::store(&_soft_max_size, v);
781 }
782
783 size_t ShenandoahHeap::min_capacity() const {
784 return _minimum_size;
785 }
786
787 size_t ShenandoahHeap::initial_capacity() const {
788 return _initial_size;
789 }
790
791 bool ShenandoahHeap::is_in(const void* p) const {
792 if (!is_in_reserved(p)) {
793 return false;
794 }
795
796 if (is_full_gc_move_in_progress()) {
797 // Full GC move is running, we do not have a consistent region
798 // information yet. But we know the pointer is in heap.
799 return true;
800 }
801
802 // Now check if we point to a live section in active region.
803 const ShenandoahHeapRegion* r = heap_region_containing(p);
804 if (p >= r->top()) {
805 return false;
806 }
807
808 if (r->is_active()) {
809 return true;
810 }
811
812 // The region is trash, but won't be recycled until after concurrent weak
813 // roots. We also don't allow mutators to allocate from trash regions
814 // during weak roots. Concurrent class unloading may access unmarked oops
815 // in trash regions.
816 return r->is_trash() && is_concurrent_weak_root_in_progress();
817 }
818
819 void ShenandoahHeap::notify_soft_max_changed() {
820 if (_uncommit_thread != nullptr) {
821 _uncommit_thread->notify_soft_max_changed();
822 }
823 }
824
825 void ShenandoahHeap::notify_explicit_gc_requested() {
826 if (_uncommit_thread != nullptr) {
827 _uncommit_thread->notify_explicit_gc_requested();
828 }
829 }
830
831 bool ShenandoahHeap::check_soft_max_changed() {
832 size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
833 size_t old_soft_max = soft_max_capacity();
834 if (new_soft_max != old_soft_max) {
835 new_soft_max = MAX2(min_capacity(), new_soft_max);
836 new_soft_max = MIN2(max_capacity(), new_soft_max);
837 if (new_soft_max != old_soft_max) {
838 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
839 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
840 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
841 );
842 set_soft_max_capacity(new_soft_max);
843 return true;
844 }
845 }
846 return false;
847 }
848
849 void ShenandoahHeap::notify_heap_changed() {
850 // Update monitoring counters when we took a new region. This amortizes the
851 // update costs on slow path.
852 monitoring_support()->notify_heap_changed();
853 _heap_changed.try_set();
854 }
855
856 void ShenandoahHeap::set_forced_counters_update(bool value) {
857 monitoring_support()->set_forced_counters_update(value);
858 }
859
860 void ShenandoahHeap::handle_force_counters_update() {
861 monitoring_support()->handle_force_counters_update();
862 }
863
864 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
865 // New object should fit the GCLAB size
866 size_t min_size = MAX2(size, PLAB::min_size());
867
868 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
869 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
870
871 new_size = MIN2(new_size, PLAB::max_size());
872 new_size = MAX2(new_size, PLAB::min_size());
873
874 // Record new heuristic value even if we take any shortcut. This captures
875 // the case when moderately-sized objects always take a shortcut. At some point,
876 // heuristics should catch up with them.
877 log_debug(gc, free)("Set new GCLAB size: " SIZE_FORMAT, new_size);
878 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
879
880 if (new_size < size) {
881 // New size still does not fit the object. Fall back to shared allocation.
882 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
883 log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
884 return nullptr;
885 }
886
887 // Retire current GCLAB, and allocate a new one.
888 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
889 gclab->retire();
890
891 size_t actual_size = 0;
892 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
893 if (gclab_buf == nullptr) {
894 return nullptr;
895 }
896
897 assert (size <= actual_size, "allocation should fit");
898
899 // ...and clear or zap just allocated TLAB, if needed.
900 if (ZeroTLAB) {
901 Copy::zero_to_words(gclab_buf, actual_size);
902 } else if (ZapTLAB) {
903 // Skip mangling the space corresponding to the object header to
904 // ensure that the returned space is not considered parsable by
905 // any concurrent GC thread.
906 size_t hdr_size = oopDesc::header_size();
907 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
908 }
909 gclab->set_buf(gclab_buf, actual_size);
910 return gclab->allocate(size);
911 }
912
913 // Called from stubs in JIT code or interpreter
914 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
915 size_t requested_size,
916 size_t* actual_size) {
917 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
918 HeapWord* res = allocate_memory(req);
919 if (res != nullptr) {
920 *actual_size = req.actual_size();
921 } else {
922 *actual_size = 0;
923 }
924 return res;
925 }
926
927 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
928 size_t word_size,
929 size_t* actual_size) {
930 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
931 HeapWord* res = allocate_memory(req);
932 if (res != nullptr) {
933 *actual_size = req.actual_size();
935 *actual_size = 0;
936 }
937 return res;
938 }
939
940 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
941 intptr_t pacer_epoch = 0;
942 bool in_new_region = false;
943 HeapWord* result = nullptr;
944
945 if (req.is_mutator_alloc()) {
946 if (ShenandoahPacing) {
947 pacer()->pace_for_alloc(req.size());
948 pacer_epoch = pacer()->epoch();
949 }
950
951 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
952 result = allocate_memory_under_lock(req, in_new_region);
953 }
954
955 // Check that gc overhead is not exceeded.
956 //
957 // Shenandoah will grind along for quite a while allocating one
958 // object at a time using shared (non-tlab) allocations. This check
959 // is testing that the GC overhead limit has not been exceeded.
960 // This will notify the collector to start a cycle, but will raise
961 // an OOME to the mutator if the last Full GCs have not made progress.
962 // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
963 if ((result == nullptr) && !req.is_lab_alloc() && (get_gc_no_progress_count() > ShenandoahNoProgressThreshold)) {
964 control_thread()->handle_alloc_failure(req, false);
965 req.set_actual_size(0);
966 return nullptr;
967 }
968
969 if (result == nullptr) {
970 // Block until control thread reacted, then retry allocation.
971 //
972 // It might happen that one of the threads requesting allocation would unblock
973 // way later after GC happened, only to fail the second allocation, because
974 // other threads have already depleted the free storage. In this case, a better
975 // strategy is to try again, until at least one full GC has completed.
976 //
977 // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
978 // a) We experienced a GC that had good progress, or
979 // b) We experienced at least one Full GC (whether or not it had good progress)
980
981 const size_t original_count = shenandoah_policy()->full_gc_count();
982 while (result == nullptr && should_retry_allocation(original_count)) {
983 control_thread()->handle_alloc_failure(req, true);
984 result = allocate_memory_under_lock(req, in_new_region);
985 }
986 if (result != nullptr) {
987 // If our allocation request has been satisfied after it initially failed, we count this as good gc progress
988 notify_gc_progress();
989 }
990 if (log_develop_is_enabled(Debug, gc, alloc)) {
991 ResourceMark rm;
992 log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
993 ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
994 Thread::current()->name(), p2i(result), req.type_string(), req.size(),
995 original_count, get_gc_no_progress_count());
996 }
997 }
998 } else {
999 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1000 result = allocate_memory_under_lock(req, in_new_region);
1001 // Do not call handle_alloc_failure() here, because we cannot block.
1002 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1003 }
1004
1005 if (in_new_region) {
1006 notify_heap_changed();
1007 }
1008
1009 if (result == nullptr) {
1010 req.set_actual_size(0);
1011 }
1012
1013 // This is called regardless of the outcome of the allocation to account
1014 // for any waste created by retiring regions with this request.
1015 increase_used(req);
1016
1017 if (result != nullptr) {
1018 size_t requested = req.size();
1019 size_t actual = req.actual_size();
1020
1021 assert (req.is_lab_alloc() || (requested == actual),
1022 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1023 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1024
1025 if (req.is_mutator_alloc()) {
1026 // If we requested more than we were granted, give the rest back to pacer.
1027 // This only matters if we are in the same pacing epoch: do not try to unpace
1028 // over the budget for the other phase.
1029 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1030 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1031 }
1032 }
1033 }
1034
1035 return result;
1036 }
1037
1038 inline bool ShenandoahHeap::should_retry_allocation(size_t original_full_gc_count) const {
1039 return shenandoah_policy()->full_gc_count() == original_full_gc_count
1040 && !shenandoah_policy()->is_at_shutdown();
1041 }
1042
1043 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1044 // If we are dealing with mutator allocation, then we may need to block for safepoint.
1045 // We cannot block for safepoint for GC allocations, because there is a high chance
1046 // we are already running at safepoint or from stack watermark machinery, and we cannot
1047 // block again.
1048 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1049
1050 // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1051 if (req.is_old() && !old_generation()->can_allocate(req)) {
1052 return nullptr;
1053 }
1054
1055 // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1056 // memory.
1057 HeapWord* result = _free_set->allocate(req, in_new_region);
1058
1059 // Record the plab configuration for this result and register the object.
1060 if (result != nullptr && req.is_old()) {
1061 old_generation()->configure_plab_for_current_thread(req);
1062 if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1063 // Register the newly allocated object while we're holding the global lock since there's no synchronization
1064 // built in to the implementation of register_object(). There are potential races when multiple independent
1065 // threads are allocating objects, some of which might span the same card region. For example, consider
1066 // a card table's memory region within which three objects are being allocated by three different threads:
1067 //
1068 // objects being "concurrently" allocated:
1069 // [-----a------][-----b-----][--------------c------------------]
1070 // [---- card table memory range --------------]
1071 //
1072 // Before any objects are allocated, this card's memory range holds no objects. Note that allocation of object a
1073 // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1074 // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1075 // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1076 // card region.
1077 //
1078 // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1079 // last-start representing object b while first-start represents object c. This is why we need to require all
1080 // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1081 old_generation()->card_scan()->register_object(result);
1082 }
1083 }
1084
1085 return result;
1086 }
1087
1088 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1089 bool* gc_overhead_limit_was_exceeded) {
1090 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1091 return allocate_memory(req);
1092 }
1093
1094 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1095 size_t size,
1096 Metaspace::MetadataType mdtype) {
1097 MetaWord* result;
1098
1099 // Inform metaspace OOM to GC heuristics if class unloading is possible.
1100 ShenandoahHeuristics* h = global_generation()->heuristics();
1101 if (h->can_unload_classes()) {
1102 h->record_metaspace_oom();
1103 }
1104
1105 // Expand and retry allocation
1106 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1107 if (result != nullptr) {
1108 return result;
1109 }
1110
1111 // Start full GC
1112 collect(GCCause::_metadata_GC_clear_soft_refs);
1113
1114 // Retry allocation
1115 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1116 if (result != nullptr) {
1117 return result;
1118 }
1119
1120 // Expand and retry allocation
1121 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1173
1174 private:
1175 void do_work() {
1176 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1177 ShenandoahHeapRegion* r;
1178 while ((r =_cs->claim_next()) != nullptr) {
1179 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1180 _sh->marked_object_iterate(r, &cl);
1181
1182 if (ShenandoahPacing) {
1183 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1184 }
1185
1186 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1187 break;
1188 }
1189 }
1190 }
1191 };
1192
1193 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1194 private:
1195 bool const _resize;
1196 public:
1197 explicit ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1198 void do_thread(Thread* thread) override {
1199 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1200 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1201 gclab->retire();
1202 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1203 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1204 }
1205
1206 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1207 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1208 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1209
1210 // There are two reasons to retire all plabs between old-gen evacuation passes.
1211 // 1. We need to make the plab memory parsable by remembered-set scanning.
1212 // 2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1213 ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1214 if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1215 ShenandoahThreadLocalData::set_plab_size(thread, 0);
1216 }
1217 }
1218 }
1219 };
1220
1221 class ShenandoahGCStatePropagator : public HandshakeClosure {
1222 public:
1223 explicit ShenandoahGCStatePropagator(char gc_state) :
1224 HandshakeClosure("Shenandoah GC State Change"),
1225 _gc_state(gc_state) {}
1226
1227 void do_thread(Thread* thread) override {
1228 ShenandoahThreadLocalData::set_gc_state(thread, _gc_state);
1229 }
1230 private:
1231 char _gc_state;
1232 };
1233
1234 class ShenandoahPrepareForUpdateRefs : public HandshakeClosure {
1235 public:
1236 explicit ShenandoahPrepareForUpdateRefs(char gc_state) :
1237 HandshakeClosure("Shenandoah Prepare for Update Refs"),
1238 _retire(ResizeTLAB), _propagator(gc_state) {}
1239
1240 void do_thread(Thread* thread) override {
1241 _propagator.do_thread(thread);
1242 if (ShenandoahThreadLocalData::gclab(thread) != nullptr) {
1243 _retire.do_thread(thread);
1244 }
1245 }
1246 private:
1247 ShenandoahRetireGCLABClosure _retire;
1248 ShenandoahGCStatePropagator _propagator;
1249 };
1250
1251 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1252 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1253 workers()->run_task(&task);
1254 }
1255
1256 void ShenandoahHeap::concurrent_prepare_for_update_refs() {
1257 {
1258 // Java threads take this lock while they are being attached and added to the list of thread.
1259 // If another thread holds this lock before we update the gc state, it will receive a stale
1260 // gc state, but they will have been added to the list of java threads and so will be corrected
1261 // by the following handshake.
1262 MutexLocker lock(Threads_lock);
1263
1264 // A cancellation at this point means the degenerated cycle must resume from update-refs.
1265 set_gc_state_concurrent(EVACUATION, false);
1266 set_gc_state_concurrent(WEAK_ROOTS, false);
1267 set_gc_state_concurrent(UPDATE_REFS, true);
1268 }
1269
1270 // This will propagate the gc state and retire gclabs and plabs for threads that require it.
1271 ShenandoahPrepareForUpdateRefs prepare_for_update_refs(_gc_state.raw_value());
1272
1273 // The handshake won't touch worker threads (or control thread, or VM thread), so do those separately.
1274 Threads::non_java_threads_do(&prepare_for_update_refs);
1275
1276 // Now retire gclabs and plabs and propagate gc_state for mutator threads
1277 Handshake::execute(&prepare_for_update_refs);
1278
1279 _update_refs_iterator.reset();
1280 }
1281
1282 class ShenandoahCompositeHandshakeClosure : public HandshakeClosure {
1283 HandshakeClosure* _handshake_1;
1284 HandshakeClosure* _handshake_2;
1285 public:
1286 ShenandoahCompositeHandshakeClosure(HandshakeClosure* handshake_1, HandshakeClosure* handshake_2) :
1287 HandshakeClosure(handshake_2->name()),
1288 _handshake_1(handshake_1), _handshake_2(handshake_2) {}
1289
1290 void do_thread(Thread* thread) override {
1291 _handshake_1->do_thread(thread);
1292 _handshake_2->do_thread(thread);
1293 }
1294 };
1295
1296 void ShenandoahHeap::concurrent_final_roots(HandshakeClosure* handshake_closure) {
1297 {
1298 assert(!is_evacuation_in_progress(), "Should not evacuate for abbreviated or old cycles");
1299 MutexLocker lock(Threads_lock);
1300 set_gc_state_concurrent(WEAK_ROOTS, false);
1301 }
1302
1303 ShenandoahGCStatePropagator propagator(_gc_state.raw_value());
1304 Threads::non_java_threads_do(&propagator);
1305 if (handshake_closure == nullptr) {
1306 Handshake::execute(&propagator);
1307 } else {
1308 ShenandoahCompositeHandshakeClosure composite(&propagator, handshake_closure);
1309 Handshake::execute(&composite);
1310 }
1311 }
1312
1313 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1314 assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1315 if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1316 // This thread went through the OOM during evac protocol. It is safe to return
1317 // the forward pointer. It must not attempt to evacuate any other objects.
1318 return ShenandoahBarrierSet::resolve_forwarded(p);
1319 }
1320
1321 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1322
1323 ShenandoahHeapRegion* r = heap_region_containing(p);
1324 assert(!r->is_humongous(), "never evacuate humongous objects");
1325
1326 ShenandoahAffiliation target_gen = r->affiliation();
1327 return try_evacuate_object(p, thread, r, target_gen);
1328 }
1329
1330 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1331 ShenandoahAffiliation target_gen) {
1332 assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1333 assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1334 bool alloc_from_lab = true;
1335 HeapWord* copy = nullptr;
1336 size_t size = p->size();
1337
1338 #ifdef ASSERT
1339 if (ShenandoahOOMDuringEvacALot &&
1340 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1341 copy = nullptr;
1342 } else {
1343 #endif
1344 if (UseTLAB) {
1345 copy = allocate_from_gclab(thread, size);
1346 }
1347 if (copy == nullptr) {
1348 // If we failed to allocate in LAB, we'll try a shared allocation.
1349 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1350 copy = allocate_memory(req);
1351 alloc_from_lab = false;
1352 }
1353 #ifdef ASSERT
1354 }
1355 #endif
1356
1357 if (copy == nullptr) {
1358 control_thread()->handle_alloc_failure_evac(size);
1359
1360 _oom_evac_handler.handle_out_of_memory_during_evacuation();
1361
1362 return ShenandoahBarrierSet::resolve_forwarded(p);
1363 }
1364
1365 // Copy the object:
1366 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1367
1368 // Try to install the new forwarding pointer.
1369 oop copy_val = cast_to_oop(copy);
1370 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1371 if (result == copy_val) {
1372 // Successfully evacuated. Our copy is now the public one!
1373 ContinuationGCSupport::relativize_stack_chunk(copy_val);
1374 shenandoah_assert_correct(nullptr, copy_val);
1375 return copy_val;
1376 } else {
1377 // Failed to evacuate. We need to deal with the object that is left behind. Since this
1378 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1379 // But if it happens to contain references to evacuated regions, those references would
1380 // not get updated for this stale copy during this cycle, and we will crash while scanning
1381 // it the next cycle.
1382 if (alloc_from_lab) {
1383 // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1384 // object will overwrite this stale copy, or the filler object on LAB retirement will
1385 // do this.
1386 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1387 } else {
1388 // For non-LAB allocations, we have no way to retract the allocation, and
1389 // have to explicitly overwrite the copy with the filler object. With that overwrite,
1390 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1391 assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1392 fill_with_object(copy, size);
1393 shenandoah_assert_correct(nullptr, copy_val);
1394 // For non-LAB allocations, the object has already been registered
1395 }
1396 shenandoah_assert_correct(nullptr, result);
1397 return result;
1398 }
1399 }
1400
1401 void ShenandoahHeap::trash_cset_regions() {
1402 ShenandoahHeapLocker locker(lock());
1403
1404 ShenandoahCollectionSet* set = collection_set();
1405 ShenandoahHeapRegion* r;
1406 set->clear_current_index();
1407 while ((r = set->next()) != nullptr) {
1408 r->make_trash();
1409 }
1410 collection_set()->clear();
1411 }
1412
1413 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1414 st->print_cr("Heap Regions:");
1415 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1416 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1417 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1418 st->print_cr("UWM=update watermark, U=used");
1419 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1420 st->print_cr("S=shared allocs, L=live data");
1421 st->print_cr("CP=critical pins");
1422
1423 for (size_t i = 0; i < num_regions(); i++) {
1424 get_region(i)->print_on(st);
1425 }
1426 }
1427
1428 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1429 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1430
1431 oop humongous_obj = cast_to_oop(start->bottom());
1432 size_t size = humongous_obj->size();
1433 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1434 size_t index = start->index() + required_regions - 1;
1435
1436 assert(!start->has_live(), "liveness must be zero");
1437
1438 for(size_t i = 0; i < required_regions; i++) {
1439 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1440 // as it expects that every region belongs to a humongous region starting with a humongous start region.
1441 ShenandoahHeapRegion* region = get_region(index --);
1442
1443 assert(region->is_humongous(), "expect correct humongous start or continuation");
1444 assert(!region->is_cset(), "Humongous region should not be in collection set");
1445
1446 region->make_trash_immediate();
1447 }
1448 return required_regions;
1449 }
1450
1451 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1452 public:
1453 ShenandoahCheckCleanGCLABClosure() {}
1454 void do_thread(Thread* thread) {
1455 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1456 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1457 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1458
1459 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1460 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1461 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1462 assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1463 }
1464 }
1465 };
1466
1467 void ShenandoahHeap::labs_make_parsable() {
1468 assert(UseTLAB, "Only call with UseTLAB");
1469
1470 ShenandoahRetireGCLABClosure cl(false);
1471
1472 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1473 ThreadLocalAllocBuffer& tlab = t->tlab();
1474 tlab.make_parsable();
1475 cl.do_thread(t);
1476 }
1477
1478 workers()->threads_do(&cl);
1479
1480 if (safepoint_workers() != nullptr) {
1481 safepoint_workers()->threads_do(&cl);
1482 }
1483 }
1484
1485 void ShenandoahHeap::tlabs_retire(bool resize) {
1486 assert(UseTLAB, "Only call with UseTLAB");
1487 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1488
1489 ThreadLocalAllocStats stats;
1490
1491 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1492 ThreadLocalAllocBuffer& tlab = t->tlab();
1493 tlab.retire(&stats);
1494 if (resize) {
1495 tlab.resize();
1496 }
1497 }
1498
1499 stats.publish();
1500
1501 #ifdef ASSERT
1502 ShenandoahCheckCleanGCLABClosure cl;
1503 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1504 cl.do_thread(t);
1505 }
1506 workers()->threads_do(&cl);
1507 #endif
1508 }
1509
1510 void ShenandoahHeap::gclabs_retire(bool resize) {
1511 assert(UseTLAB, "Only call with UseTLAB");
1512 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1513
1514 ShenandoahRetireGCLABClosure cl(resize);
1515 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1516 cl.do_thread(t);
1517 }
1518
1519 workers()->threads_do(&cl);
1520
1521 if (safepoint_workers() != nullptr) {
1522 safepoint_workers()->threads_do(&cl);
1523 }
1524 }
1525
1526 // Returns size in bytes
1527 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1528 // Return the max allowed size, and let the allocation path
1529 // figure out the safe size for current allocation.
1530 return ShenandoahHeapRegion::max_tlab_size_bytes();
1531 }
1532
1533 size_t ShenandoahHeap::max_tlab_size() const {
1534 // Returns size in words
1535 return ShenandoahHeapRegion::max_tlab_size_words();
1536 }
1537
1538 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1562 }
1563 return nullptr;
1564 }
1565
1566 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1567 ShenandoahHeapRegion* r = heap_region_containing(addr);
1568 return r->block_is_obj(addr);
1569 }
1570
1571 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1572 return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1573 }
1574
1575 void ShenandoahHeap::prepare_for_verify() {
1576 if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1577 labs_make_parsable();
1578 }
1579 }
1580
1581 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1582 if (_shenandoah_policy->is_at_shutdown()) {
1583 return;
1584 }
1585
1586 if (_control_thread != nullptr) {
1587 tcl->do_thread(_control_thread);
1588 }
1589
1590 if (_uncommit_thread != nullptr) {
1591 tcl->do_thread(_uncommit_thread);
1592 }
1593
1594 workers()->threads_do(tcl);
1595 if (_safepoint_workers != nullptr) {
1596 _safepoint_workers->threads_do(tcl);
1597 }
1598 }
1599
1600 void ShenandoahHeap::print_tracing_info() const {
1601 LogTarget(Info, gc, stats) lt;
1602 if (lt.is_enabled()) {
1603 ResourceMark rm;
1604 LogStream ls(lt);
1605
1606 phase_timings()->print_global_on(&ls);
1607
1608 ls.cr();
1609 ls.cr();
1610
1611 shenandoah_policy()->print_gc_stats(&ls);
1612
1613 ls.cr();
1614 ls.cr();
1615 }
1616 }
1617
1618 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1619 shenandoah_assert_control_or_vm_thread_at_safepoint();
1620 _gc_generation = generation;
1621 }
1622
1623 // Active generation may only be set by the VM thread at a safepoint.
1624 void ShenandoahHeap::set_active_generation() {
1625 assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1626 assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1627 assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1628 _active_generation = _gc_generation;
1629 }
1630
1631 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1632 shenandoah_policy()->record_collection_cause(cause);
1633
1634 const GCCause::Cause current = gc_cause();
1635 assert(current == GCCause::_no_gc, "Over-writing cause: %s, with: %s",
1636 GCCause::to_string(current), GCCause::to_string(cause));
1637 assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1638
1639 set_gc_cause(cause);
1640 set_gc_generation(generation);
1641
1642 generation->heuristics()->record_cycle_start();
1643 }
1644
1645 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1646 assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1647 assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1648
1649 generation->heuristics()->record_cycle_end();
1650 if (mode()->is_generational() && generation->is_global()) {
1651 // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1652 young_generation()->heuristics()->record_cycle_end();
1653 old_generation()->heuristics()->record_cycle_end();
1654 }
1655
1656 set_gc_generation(nullptr);
1657 set_gc_cause(GCCause::_no_gc);
1658 }
1659
1660 void ShenandoahHeap::verify(VerifyOption vo) {
1661 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1662 if (ShenandoahVerify) {
1663 verifier()->verify_generic(vo);
1664 } else {
1665 // TODO: Consider allocating verification bitmaps on demand,
1666 // and turn this on unconditionally.
1667 }
1668 }
1669 }
1670 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1671 return _free_set->capacity();
1672 }
1673
1674 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1675 private:
1676 MarkBitMap* _bitmap;
1677 ShenandoahScanObjectStack* _oop_stack;
1678 ShenandoahHeap* const _heap;
1679 ShenandoahMarkingContext* const _marking_context;
1974 const uint active_workers = workers()->active_workers();
1975 const size_t n_regions = num_regions();
1976 size_t stride = ShenandoahParallelRegionStride;
1977 if (stride == 0 && active_workers > 1) {
1978 // Automatically derive the stride to balance the work between threads
1979 // evenly. Do not try to split work if below the reasonable threshold.
1980 constexpr size_t threshold = 4096;
1981 stride = n_regions <= threshold ?
1982 threshold :
1983 (n_regions + active_workers - 1) / active_workers;
1984 }
1985
1986 if (n_regions > stride && active_workers > 1) {
1987 ShenandoahParallelHeapRegionTask task(blk, stride);
1988 workers()->run_task(&task);
1989 } else {
1990 heap_region_iterate(blk);
1991 }
1992 }
1993
1994 class ShenandoahRendezvousClosure : public HandshakeClosure {
1995 public:
1996 inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1997 inline void do_thread(Thread* thread) {}
1998 };
1999
2000 void ShenandoahHeap::rendezvous_threads(const char* name) {
2001 ShenandoahRendezvousClosure cl(name);
2002 Handshake::execute(&cl);
2003 }
2004
2005 void ShenandoahHeap::recycle_trash() {
2006 free_set()->recycle_trash();
2007 }
2008
2009 void ShenandoahHeap::do_class_unloading() {
2010 _unloader.unload();
2011 if (mode()->is_generational()) {
2012 old_generation()->set_parsable(false);
2013 }
2014 }
2015
2016 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
2017 // Weak refs processing
2018 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2019 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2020 ShenandoahTimingsTracker t(phase);
2021 ShenandoahGCWorkerPhase worker_phase(phase);
2022 shenandoah_assert_generations_reconciled();
2023 gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2024 }
2025
2026 void ShenandoahHeap::prepare_update_heap_references() {
2027 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2028
2029 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2030 // make them parsable for update code to work correctly. Plus, we can compute new sizes
2031 // for future GCLABs here.
2032 if (UseTLAB) {
2033 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2034 gclabs_retire(ResizeTLAB);
2035 }
2036
2037 _update_refs_iterator.reset();
2038 }
2039
2040 void ShenandoahHeap::propagate_gc_state_to_all_threads() {
2041 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2042 if (_gc_state_changed) {
2043 ShenandoahGCStatePropagator propagator(_gc_state.raw_value());
2044 Threads::threads_do(&propagator);
2045 _gc_state_changed = false;
2046 }
2047 }
2048
2049 void ShenandoahHeap::set_gc_state_at_safepoint(uint mask, bool value) {
2050 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2051 _gc_state.set_cond(mask, value);
2052 _gc_state_changed = true;
2053 }
2054
2055 void ShenandoahHeap::set_gc_state_concurrent(uint mask, bool value) {
2056 // Holding the thread lock here assures that any thread created after we change the gc
2057 // state will have the correct state. It also prevents attaching threads from seeing
2058 // an inconsistent state. See ShenandoahBarrierSet::on_thread_attach for reference. Established
2059 // threads will use their thread local copy of the gc state (changed by a handshake, or on a
2060 // safepoint).
2061 assert(Threads_lock->is_locked(), "Must hold thread lock for concurrent gc state change");
2062 _gc_state.set_cond(mask, value);
2063 }
2064
2065 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2066 uint mask;
2067 assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
2068 if (!in_progress && is_concurrent_old_mark_in_progress()) {
2069 assert(mode()->is_generational(), "Only generational GC has old marking");
2070 assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
2071 // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
2072 mask = YOUNG_MARKING;
2073 } else {
2074 mask = MARKING | YOUNG_MARKING;
2075 }
2076 set_gc_state_at_safepoint(mask, in_progress);
2077 manage_satb_barrier(in_progress);
2078 }
2079
2080 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2081 #ifdef ASSERT
2082 // has_forwarded_objects() iff UPDATE_REFS or EVACUATION
2083 bool has_forwarded = has_forwarded_objects();
2084 bool updating_or_evacuating = _gc_state.is_set(UPDATE_REFS | EVACUATION);
2085 bool evacuating = _gc_state.is_set(EVACUATION);
2086 assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
2087 "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
2088 #endif
2089 if (!in_progress && is_concurrent_young_mark_in_progress()) {
2090 // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2091 assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2092 set_gc_state_at_safepoint(OLD_MARKING, in_progress);
2093 } else {
2094 set_gc_state_at_safepoint(MARKING | OLD_MARKING, in_progress);
2095 }
2096 manage_satb_barrier(in_progress);
2097 }
2098
2099 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2100 return old_generation()->is_preparing_for_mark();
2101 }
2102
2103 void ShenandoahHeap::manage_satb_barrier(bool active) {
2104 if (is_concurrent_mark_in_progress()) {
2105 // Ignore request to deactivate barrier while concurrent mark is in progress.
2106 // Do not attempt to re-activate the barrier if it is already active.
2107 if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2108 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2109 }
2110 } else {
2111 // No concurrent marking is in progress so honor request to deactivate,
2112 // but only if the barrier is already active.
2113 if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2114 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2115 }
2116 }
2117 }
2118
2119 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2120 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2121 set_gc_state_at_safepoint(EVACUATION, in_progress);
2122 }
2123
2124 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2125 if (in_progress) {
2126 _concurrent_strong_root_in_progress.set();
2127 } else {
2128 _concurrent_strong_root_in_progress.unset();
2129 }
2130 }
2131
2132 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2133 set_gc_state_at_safepoint(WEAK_ROOTS, cond);
2134 }
2135
2136 GCTracer* ShenandoahHeap::tracer() {
2137 return shenandoah_policy()->tracer();
2138 }
2139
2140 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2141 return _free_set->used();
2142 }
2143
2144 bool ShenandoahHeap::try_cancel_gc(GCCause::Cause cause) {
2145 const GCCause::Cause prev = _cancelled_gc.xchg(cause);
2146 return prev == GCCause::_no_gc || prev == GCCause::_shenandoah_concurrent_gc;
2147 }
2148
2149 void ShenandoahHeap::cancel_concurrent_mark() {
2150 if (mode()->is_generational()) {
2151 young_generation()->cancel_marking();
2152 old_generation()->cancel_marking();
2153 }
2154
2155 global_generation()->cancel_marking();
2156
2157 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2158 }
2159
2160 bool ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2161 if (try_cancel_gc(cause)) {
2162 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2163 log_info(gc,thread)("%s", msg.buffer());
2164 Events::log(Thread::current(), "%s", msg.buffer());
2165 _cancel_requested_time = os::elapsedTime();
2166 return true;
2167 }
2168 return false;
2169 }
2170
2171 uint ShenandoahHeap::max_workers() {
2172 return _max_workers;
2173 }
2174
2175 void ShenandoahHeap::stop() {
2176 // The shutdown sequence should be able to terminate when GC is running.
2177
2178 // Step 0. Notify policy to disable event recording.
2179 _shenandoah_policy->record_shutdown();
2180
2181 // Step 1. Stop reporting on gc thread cpu utilization
2182 mmu_tracker()->stop();
2183
2184 // Step 2. Wait until GC worker exits normally (this will cancel any ongoing GC).
2185 control_thread()->stop();
2186
2187 // Stop 4. Shutdown uncommit thread.
2188 if (_uncommit_thread != nullptr) {
2189 _uncommit_thread->stop();
2190 }
2191 }
2192
2193 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2194 if (!unload_classes()) return;
2195 ClassUnloadingContext ctx(_workers->active_workers(),
2196 true /* unregister_nmethods_during_purge */,
2197 false /* lock_codeblob_free_separately */);
2198
2199 // Unload classes and purge SystemDictionary.
2200 {
2201 ShenandoahPhaseTimings::Phase phase = full_gc ?
2202 ShenandoahPhaseTimings::full_gc_purge_class_unload :
2203 ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2204 ShenandoahIsAliveSelector is_alive;
2205 {
2206 CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
2207 ShenandoahGCPhase gc_phase(phase);
2208 ShenandoahGCWorkerPhase worker_phase(phase);
2209 bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
2210
2211 uint num_workers = _workers->active_workers();
2212 ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
2213 _workers->run_task(&unlink_task);
2214 }
2215 // Release unloaded nmethods's memory.
2216 ClassUnloadingContext::context()->purge_and_free_nmethods();
2217 }
2218
2219 {
2220 ShenandoahGCPhase phase(full_gc ?
2221 ShenandoahPhaseTimings::full_gc_purge_cldg :
2222 ShenandoahPhaseTimings::degen_gc_purge_cldg);
2223 ClassLoaderDataGraph::purge(true /* at_safepoint */);
2224 }
2225 // Resize and verify metaspace
2226 MetaspaceGC::compute_new_size();
2227 DEBUG_ONLY(MetaspaceUtils::verify();)
2228 }
2229
2230 // Weak roots are either pre-evacuated (final mark) or updated (final update refs),
2231 // so they should not have forwarded oops.
2232 // However, we do need to "null" dead oops in the roots, if can not be done
2233 // in concurrent cycles.
2234 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2235 uint num_workers = _workers->active_workers();
2236 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2237 ShenandoahPhaseTimings::full_gc_purge_weak_par :
2238 ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2239 ShenandoahGCPhase phase(timing_phase);
2240 ShenandoahGCWorkerPhase worker_phase(timing_phase);
2241 // Cleanup weak roots
2242 if (has_forwarded_objects()) {
2243 ShenandoahForwardedIsAliveClosure is_alive;
2244 ShenandoahUpdateRefsClosure keep_alive;
2245 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2246 cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2247 _workers->run_task(&cleaning_task);
2248 } else {
2249 ShenandoahIsAliveClosure is_alive;
2250 #ifdef ASSERT
2254 #else
2255 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2256 cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2257 #endif
2258 _workers->run_task(&cleaning_task);
2259 }
2260 }
2261
2262 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2263 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2264 assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2265 ShenandoahGCPhase phase(full_gc ?
2266 ShenandoahPhaseTimings::full_gc_purge :
2267 ShenandoahPhaseTimings::degen_gc_purge);
2268 stw_weak_refs(full_gc);
2269 stw_process_weak_roots(full_gc);
2270 stw_unload_classes(full_gc);
2271 }
2272
2273 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2274 set_gc_state_at_safepoint(HAS_FORWARDED, cond);
2275 }
2276
2277 void ShenandoahHeap::set_unload_classes(bool uc) {
2278 _unload_classes.set_cond(uc);
2279 }
2280
2281 bool ShenandoahHeap::unload_classes() const {
2282 return _unload_classes.is_set();
2283 }
2284
2285 address ShenandoahHeap::in_cset_fast_test_addr() {
2286 ShenandoahHeap* heap = ShenandoahHeap::heap();
2287 assert(heap->collection_set() != nullptr, "Sanity");
2288 return (address) heap->collection_set()->biased_map_address();
2289 }
2290
2291 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2292 if (mode()->is_generational()) {
2293 young_generation()->reset_bytes_allocated_since_gc_start();
2294 old_generation()->reset_bytes_allocated_since_gc_start();
2295 }
2296
2297 global_generation()->reset_bytes_allocated_since_gc_start();
2298 }
2299
2300 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2301 _degenerated_gc_in_progress.set_cond(in_progress);
2302 }
2303
2304 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2305 _full_gc_in_progress.set_cond(in_progress);
2306 }
2307
2308 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2309 assert (is_full_gc_in_progress(), "should be");
2310 _full_gc_move_in_progress.set_cond(in_progress);
2311 }
2312
2313 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2314 set_gc_state_at_safepoint(UPDATE_REFS, in_progress);
2315 }
2316
2317 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2318 ShenandoahCodeRoots::register_nmethod(nm);
2319 }
2320
2321 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2322 ShenandoahCodeRoots::unregister_nmethod(nm);
2323 }
2324
2325 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2326 heap_region_containing(o)->record_pin();
2327 }
2328
2329 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2330 ShenandoahHeapRegion* r = heap_region_containing(o);
2331 assert(r != nullptr, "Sanity");
2332 assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
2333 r->record_unpin();
2334 }
2341 if (r->is_active()) {
2342 if (r->is_pinned()) {
2343 if (r->pin_count() == 0) {
2344 r->make_unpinned();
2345 }
2346 } else {
2347 if (r->pin_count() > 0) {
2348 r->make_pinned();
2349 }
2350 }
2351 }
2352 }
2353
2354 assert_pinned_region_status();
2355 }
2356
2357 #ifdef ASSERT
2358 void ShenandoahHeap::assert_pinned_region_status() {
2359 for (size_t i = 0; i < num_regions(); i++) {
2360 ShenandoahHeapRegion* r = get_region(i);
2361 shenandoah_assert_generations_reconciled();
2362 if (gc_generation()->contains(r)) {
2363 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2364 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2365 }
2366 }
2367 }
2368 #endif
2369
2370 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2371 return _gc_timer;
2372 }
2373
2374 void ShenandoahHeap::prepare_concurrent_roots() {
2375 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2376 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2377 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2378 set_concurrent_weak_root_in_progress(true);
2379 if (unload_classes()) {
2380 _unloader.prepare();
2381 }
2382 }
2383
2384 void ShenandoahHeap::finish_concurrent_roots() {
2385 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2386 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2387 if (unload_classes()) {
2388 _unloader.finish();
2389 }
2390 }
2391
2392 #ifdef ASSERT
2393 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2394 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2395
2396 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2397 // Use ParallelGCThreads inside safepoints
2398 assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2399 ParallelGCThreads, nworkers);
2400 } else {
2401 // Use ConcGCThreads outside safepoints
2402 assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2403 ConcGCThreads, nworkers);
2404 }
2405 }
2406 #endif
2407
2408 ShenandoahVerifier* ShenandoahHeap::verifier() {
2409 guarantee(ShenandoahVerify, "Should be enabled");
2410 assert (_verifier != nullptr, "sanity");
2411 return _verifier;
2412 }
2413
2414 template<bool CONCURRENT>
2415 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2416 private:
2417 ShenandoahHeap* _heap;
2418 ShenandoahRegionIterator* _regions;
2419 public:
2420 explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2421 WorkerTask("Shenandoah Update References"),
2422 _heap(ShenandoahHeap::heap()),
2423 _regions(regions) {
2424 }
2425
2426 void work(uint worker_id) {
2427 if (CONCURRENT) {
2428 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2429 ShenandoahSuspendibleThreadSetJoiner stsj;
2430 do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2431 } else {
2432 ShenandoahParallelWorkerSession worker_session(worker_id);
2433 do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2434 }
2435 }
2436
2437 private:
2438 template<class T>
2439 void do_work(uint worker_id) {
2440 if (CONCURRENT && (worker_id == 0)) {
2441 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2442 // results of evacuation. These reserves are no longer necessary because evacuation has completed.
2443 size_t cset_regions = _heap->collection_set()->count();
2444
2445 // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2446 // to the mutator free set. At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2447 // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2448 // next GC cycle.
2449 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2450 }
2451 // If !CONCURRENT, there's no value in expanding Mutator free set
2452 T cl;
2453 ShenandoahHeapRegion* r = _regions->next();
2454 while (r != nullptr) {
2455 HeapWord* update_watermark = r->get_update_watermark();
2456 assert (update_watermark >= r->bottom(), "sanity");
2457 if (r->is_active() && !r->is_cset()) {
2458 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2459 if (ShenandoahPacing) {
2460 _heap->pacer()->report_update_refs(pointer_delta(update_watermark, r->bottom()));
2461 }
2462 }
2463 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2464 return;
2465 }
2466 r = _regions->next();
2467 }
2468 }
2469 };
2470
2471 void ShenandoahHeap::update_heap_references(bool concurrent) {
2472 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2473
2474 if (concurrent) {
2475 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2476 workers()->run_task(&task);
2477 } else {
2478 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2479 workers()->run_task(&task);
2480 }
2481 }
2482
2483 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2484 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2485 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2486
2487 {
2488 ShenandoahGCPhase phase(concurrent ?
2489 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2490 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2491
2492 final_update_refs_update_region_states();
2493
2494 assert_pinned_region_status();
2495 }
2496
2497 {
2498 ShenandoahGCPhase phase(concurrent ?
2499 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2500 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2501 trash_cset_regions();
2502 }
2503 }
2504
2505 void ShenandoahHeap::final_update_refs_update_region_states() {
2506 ShenandoahSynchronizePinnedRegionStates cl;
2507 parallel_heap_region_iterate(&cl);
2508 }
2509
2510 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2511 ShenandoahGCPhase phase(concurrent ?
2512 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2513 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2514 ShenandoahHeapLocker locker(lock());
2515 size_t young_cset_regions, old_cset_regions;
2516 size_t first_old_region, last_old_region, old_region_count;
2517 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2518 // If there are no old regions, first_old_region will be greater than last_old_region
2519 assert((first_old_region > last_old_region) ||
2520 ((last_old_region + 1 - first_old_region >= old_region_count) &&
2521 get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2522 "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT,
2523 old_region_count, first_old_region, last_old_region);
2524
2525 if (mode()->is_generational()) {
2526 #ifdef ASSERT
2527 if (ShenandoahVerify) {
2528 verifier()->verify_before_rebuilding_free_set();
2529 }
2530 #endif
2531
2532 // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2533 // available for transfer to old. Note that transfer of humongous regions does not impact available.
2534 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2535 size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2536 gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2537
2538 // Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
2539 // memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
2540 // regions in place when many of these regular regions have an abundant amount of available memory within them. Fragmentation
2541 // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2542 //
2543 // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2544 // within partially consumed regions of memory.
2545 }
2546 // Rebuild free set based on adjusted generation sizes.
2547 _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2548
2549 if (mode()->is_generational()) {
2550 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2551 ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2552 old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2553 }
2554 }
2555
2556 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2557 print_on(st);
2558 st->cr();
2559 print_heap_regions_on(st);
2560 }
2561
2562 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2563 size_t slice = r->index() / _bitmap_regions_per_slice;
2564
2565 size_t regions_from = _bitmap_regions_per_slice * slice;
2566 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2567 for (size_t g = regions_from; g < regions_to; g++) {
2568 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2569 if (skip_self && g == r->index()) continue;
2570 if (get_region(g)->is_committed()) {
2571 return true;
2572 }
2598 return false;
2599 }
2600
2601 if (AlwaysPreTouch) {
2602 os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2603 }
2604
2605 return true;
2606 }
2607
2608 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2609 shenandoah_assert_heaplocked();
2610
2611 // Bitmaps in special regions do not need uncommits
2612 if (_bitmap_region_special) {
2613 return true;
2614 }
2615
2616 if (is_bitmap_slice_committed(r, true)) {
2617 // Some other region from the group is still committed, meaning the bitmap
2618 // slice should stay committed, exit right away.
2619 return true;
2620 }
2621
2622 // Uncommit the bitmap slice:
2623 size_t slice = r->index() / _bitmap_regions_per_slice;
2624 size_t off = _bitmap_bytes_per_slice * slice;
2625 size_t len = _bitmap_bytes_per_slice;
2626 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2627 return false;
2628 }
2629 return true;
2630 }
2631
2632 void ShenandoahHeap::forbid_uncommit() {
2633 if (_uncommit_thread != nullptr) {
2634 _uncommit_thread->forbid_uncommit();
2635 }
2636 }
2637
2638 void ShenandoahHeap::allow_uncommit() {
2639 if (_uncommit_thread != nullptr) {
2640 _uncommit_thread->allow_uncommit();
2641 }
2642 }
2643
2644 #ifdef ASSERT
2645 bool ShenandoahHeap::is_uncommit_in_progress() {
2646 if (_uncommit_thread != nullptr) {
2647 return _uncommit_thread->is_uncommit_in_progress();
2648 }
2649 return false;
2650 }
2651 #endif
2652
2653 void ShenandoahHeap::safepoint_synchronize_begin() {
2654 SuspendibleThreadSet::synchronize();
2655 }
2656
2657 void ShenandoahHeap::safepoint_synchronize_end() {
2658 SuspendibleThreadSet::desynchronize();
2659 }
2660
2661 void ShenandoahHeap::try_inject_alloc_failure() {
2662 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2663 _inject_alloc_failure.set();
2664 os::naked_short_sleep(1);
2665 if (cancelled_gc()) {
2666 log_info(gc)("Allocation failure was successfully injected");
2667 }
2668 }
2669 }
2670
2671 bool ShenandoahHeap::should_inject_alloc_failure() {
2672 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2673 }
2674
2675 void ShenandoahHeap::initialize_serviceability() {
2676 _memory_pool = new ShenandoahMemoryPool(this);
2677 _cycle_memory_manager.add_pool(_memory_pool);
2678 _stw_memory_manager.add_pool(_memory_pool);
2679 }
2680
2681 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2682 GrowableArray<GCMemoryManager*> memory_managers(2);
2683 memory_managers.append(&_cycle_memory_manager);
2684 memory_managers.append(&_stw_memory_manager);
2685 return memory_managers;
2686 }
2687
2688 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2689 GrowableArray<MemoryPool*> memory_pools(1);
2690 memory_pools.append(_memory_pool);
2691 return memory_pools;
2692 }
2693
2694 MemoryUsage ShenandoahHeap::memory_usage() {
2695 return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2696 }
2697
2698 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2699 _heap(ShenandoahHeap::heap()),
2700 _index(0) {}
2701
2702 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2703 _heap(heap),
2704 _index(0) {}
2705
2706 void ShenandoahRegionIterator::reset() {
2707 _index = 0;
2708 }
2709
2710 bool ShenandoahRegionIterator::has_next() const {
2711 return _index < _heap->num_regions();
2712 }
2713
2714 char ShenandoahHeap::gc_state() const {
2715 return _gc_state.raw_value();
2716 }
2717
2718 bool ShenandoahHeap::is_gc_state(GCState state) const {
2719 // If the global gc state has been changed, but hasn't yet been propagated to all threads, then
2720 // the global gc state is the correct value. Once the gc state has been synchronized with all threads,
2721 // _gc_state_changed will be toggled to false and we need to use the thread local state.
2722 return _gc_state_changed ? _gc_state.is_set(state) : ShenandoahThreadLocalData::is_gc_state(state);
2723 }
2724
2725
2726 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2727 #ifdef ASSERT
2728 assert(_liveness_cache != nullptr, "sanity");
2729 assert(worker_id < _max_workers, "sanity");
2730 for (uint i = 0; i < num_regions(); i++) {
2731 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2732 }
2733 #endif
2734 return _liveness_cache[worker_id];
2735 }
2736
2737 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2738 assert(worker_id < _max_workers, "sanity");
2739 assert(_liveness_cache != nullptr, "sanity");
2740 ShenandoahLiveData* ld = _liveness_cache[worker_id];
2741 for (uint i = 0; i < num_regions(); i++) {
2742 ShenandoahLiveData live = ld[i];
2743 if (live > 0) {
2744 ShenandoahHeapRegion* r = get_region(i);
2745 r->increase_live_data_gc_words(live);
2748 }
2749 }
2750
2751 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2752 if (is_idle()) return false;
2753
2754 // Objects allocated after marking start are implicitly alive, don't need any barriers during
2755 // marking phase.
2756 if (is_concurrent_mark_in_progress() &&
2757 !marking_context()->allocated_after_mark_start(obj)) {
2758 return true;
2759 }
2760
2761 // Can not guarantee obj is deeply good.
2762 if (has_forwarded_objects()) {
2763 return true;
2764 }
2765
2766 return false;
2767 }
2768
2769 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2770 if (!mode()->is_generational()) {
2771 return global_generation();
2772 } else if (affiliation == YOUNG_GENERATION) {
2773 return young_generation();
2774 } else if (affiliation == OLD_GENERATION) {
2775 return old_generation();
2776 }
2777
2778 ShouldNotReachHere();
2779 return nullptr;
2780 }
2781
2782 void ShenandoahHeap::log_heap_status(const char* msg) const {
2783 if (mode()->is_generational()) {
2784 young_generation()->log_status(msg);
2785 old_generation()->log_status(msg);
2786 } else {
2787 global_generation()->log_status(msg);
2788 }
2789 }
|