1 /*
2 * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "memory/allocation.hpp"
28 #include "memory/universe.hpp"
29
30 #include "gc/shared/classUnloadingContext.hpp"
31 #include "gc/shared/gcArguments.hpp"
32 #include "gc/shared/gcTimer.hpp"
33 #include "gc/shared/gcTraceTime.inline.hpp"
34 #include "gc/shared/locationPrinter.inline.hpp"
35 #include "gc/shared/memAllocator.hpp"
36 #include "gc/shared/plab.hpp"
37 #include "gc/shared/tlab_globals.hpp"
38
39 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
40 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
41 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
42 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
43 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahControlThread.hpp"
46 #include "gc/shenandoah/shenandoahFreeSet.hpp"
47 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
48 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
49 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
50 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
51 #include "gc/shenandoah/shenandoahInitLogger.hpp"
52 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
53 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
54 #include "gc/shenandoah/shenandoahMetrics.hpp"
55 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
56 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
58 #include "gc/shenandoah/shenandoahPadding.hpp"
59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
60 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
61 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
63 #include "gc/shenandoah/shenandoahUtils.hpp"
64 #include "gc/shenandoah/shenandoahVerifier.hpp"
65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
69 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
70 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
71 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
72 #if INCLUDE_JFR
73 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
74 #endif
75
76 #include "classfile/systemDictionary.hpp"
77 #include "code/codeCache.hpp"
78 #include "memory/classLoaderMetaspace.hpp"
79 #include "memory/metaspaceUtils.hpp"
80 #include "oops/compressedOops.inline.hpp"
81 #include "prims/jvmtiTagMap.hpp"
82 #include "runtime/atomic.hpp"
83 #include "runtime/globals.hpp"
84 #include "runtime/interfaceSupport.inline.hpp"
85 #include "runtime/java.hpp"
86 #include "runtime/orderAccess.hpp"
87 #include "runtime/safepointMechanism.hpp"
88 #include "runtime/vmThread.hpp"
89 #include "services/mallocTracker.hpp"
90 #include "services/memTracker.hpp"
91 #include "utilities/events.hpp"
92 #include "utilities/powerOfTwo.hpp"
93
94 class ShenandoahPretouchHeapTask : public WorkerTask {
95 private:
96 ShenandoahRegionIterator _regions;
97 const size_t _page_size;
98 public:
99 ShenandoahPretouchHeapTask(size_t page_size) :
100 WorkerTask("Shenandoah Pretouch Heap"),
101 _page_size(page_size) {}
102
103 virtual void work(uint worker_id) {
104 ShenandoahHeapRegion* r = _regions.next();
105 while (r != nullptr) {
106 if (r->is_committed()) {
107 os::pretouch_memory(r->bottom(), r->end(), _page_size);
143 jint ShenandoahHeap::initialize() {
144 //
145 // Figure out heap sizing
146 //
147
148 size_t init_byte_size = InitialHeapSize;
149 size_t min_byte_size = MinHeapSize;
150 size_t max_byte_size = MaxHeapSize;
151 size_t heap_alignment = HeapAlignment;
152
153 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
154
155 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
156 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
157
158 _num_regions = ShenandoahHeapRegion::region_count();
159 assert(_num_regions == (max_byte_size / reg_size_bytes),
160 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
161 _num_regions, max_byte_size, reg_size_bytes);
162
163 // Now we know the number of regions, initialize the heuristics.
164 initialize_heuristics();
165
166 size_t num_committed_regions = init_byte_size / reg_size_bytes;
167 num_committed_regions = MIN2(num_committed_regions, _num_regions);
168 assert(num_committed_regions <= _num_regions, "sanity");
169 _initial_size = num_committed_regions * reg_size_bytes;
170
171 size_t num_min_regions = min_byte_size / reg_size_bytes;
172 num_min_regions = MIN2(num_min_regions, _num_regions);
173 assert(num_min_regions <= _num_regions, "sanity");
174 _minimum_size = num_min_regions * reg_size_bytes;
175
176 // Default to max heap size.
177 _soft_max_size = _num_regions * reg_size_bytes;
178
179 _committed = _initial_size;
180
181 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
182 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
183 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
184
185 //
186 // Reserve and commit memory for heap
187 //
188
189 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
190 initialize_reserved_region(heap_rs);
191 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
192 _heap_region_special = heap_rs.special();
193
194 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
195 "Misaligned heap: " PTR_FORMAT, p2i(base()));
196
197 #if SHENANDOAH_OPTIMIZED_MARKTASK
198 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
199 // Fail if we ever attempt to address more than we can.
200 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
201 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
202 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
203 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
204 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
205 vm_exit_during_initialization("Fatal Error", buf);
206 }
207 #endif
208
209 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
210 if (!_heap_region_special) {
211 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
212 "Cannot commit heap memory");
213 }
214
215 //
216 // Reserve and commit memory for bitmap(s)
217 //
218
219 _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
220 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
221
222 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
223
224 guarantee(bitmap_bytes_per_region != 0,
225 "Bitmap bytes per region should not be zero");
226 guarantee(is_power_of_2(bitmap_bytes_per_region),
227 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
228
229 if (bitmap_page_size > bitmap_bytes_per_region) {
230 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
231 _bitmap_bytes_per_slice = bitmap_page_size;
232 } else {
233 _bitmap_regions_per_slice = 1;
234 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
235 }
236
237 guarantee(_bitmap_regions_per_slice >= 1,
238 "Should have at least one region per slice: " SIZE_FORMAT,
239 _bitmap_regions_per_slice);
240
241 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
242 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
243 _bitmap_bytes_per_slice, bitmap_page_size);
244
245 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
246 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
247 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
248 _bitmap_region_special = bitmap.special();
249
250 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
251 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
252 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
253 if (!_bitmap_region_special) {
254 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
255 "Cannot commit bitmap memory");
256 }
257
258 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
259
260 if (ShenandoahVerify) {
261 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
262 if (!verify_bitmap.special()) {
263 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
264 "Cannot commit verification bitmap memory");
265 }
266 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
267 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
268 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
269 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
270 }
271
272 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
273 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
274 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
275 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
276 _aux_bitmap_region_special = aux_bitmap.special();
277 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
278
279 //
280 // Create regions and region sets
281 //
282 size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
283 size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
284 region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
285
286 ReservedSpace region_storage(region_storage_size, region_page_size);
287 MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
288 if (!region_storage.special()) {
289 os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
290 "Cannot commit region memory");
291 }
292
293 // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
294 // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
295 // If not successful, bite a bullet and allocate at whatever address.
296 {
297 size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
298 size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
299
300 uintptr_t min = round_up_power_of_2(cset_align);
301 uintptr_t max = (1u << 30u);
302
303 for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
304 char* req_addr = (char*)addr;
305 assert(is_aligned(req_addr, cset_align), "Should be aligned");
306 ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
307 if (cset_rs.is_reserved()) {
308 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
309 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
310 break;
311 }
312 }
313
314 if (_collection_set == nullptr) {
315 ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
316 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
317 }
318 }
319
320 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
321 _free_set = new ShenandoahFreeSet(this, _num_regions);
322
323 {
324 ShenandoahHeapLocker locker(lock());
325
326 for (size_t i = 0; i < _num_regions; i++) {
327 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
328 bool is_committed = i < num_committed_regions;
329 void* loc = region_storage.base() + i * region_align;
330
331 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
332 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
333
334 _marking_context->initialize_top_at_mark_start(r);
335 _regions[i] = r;
336 assert(!collection_set()->is_in(i), "New region should not be in collection set");
337 }
338
339 // Initialize to complete
340 _marking_context->mark_complete();
341
342 _free_set->rebuild();
343 }
344
345 if (AlwaysPreTouch) {
346 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
347 // before initialize() below zeroes it with initializing thread. For any given region,
348 // we touch the region and the corresponding bitmaps from the same thread.
349 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
350
351 _pretouch_heap_page_size = heap_page_size;
352 _pretouch_bitmap_page_size = bitmap_page_size;
353
354 #ifdef LINUX
355 // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
356 // pages. But, the kernel needs to know that every small page is used, in order to coalesce
357 // them into huge one. Therefore, we need to pretouch with smaller pages.
358 if (UseTransparentHugePages) {
359 _pretouch_heap_page_size = (size_t)os::vm_page_size();
360 _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
361 }
362 #endif
363
364 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
365 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
366
367 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
368 _workers->run_task(&bcl);
369
370 ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
371 _workers->run_task(&hcl);
372 }
373
374 //
375 // Initialize the rest of GC subsystems
376 //
377
378 _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
379 for (uint worker = 0; worker < _max_workers; worker++) {
380 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
381 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
382 }
383
384 // There should probably be Shenandoah-specific options for these,
385 // just as there are G1-specific options.
386 {
387 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
388 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
389 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
390 }
391
392 _monitoring_support = new ShenandoahMonitoringSupport(this);
393 _phase_timings = new ShenandoahPhaseTimings(max_workers());
394 ShenandoahCodeRoots::initialize();
395
396 if (ShenandoahPacing) {
397 _pacer = new ShenandoahPacer(this);
398 _pacer->setup_for_idle();
399 } else {
400 _pacer = nullptr;
401 }
402
403 _control_thread = new ShenandoahControlThread();
404
405 ShenandoahInitLogger::print();
406
407 return JNI_OK;
408 }
409
410 void ShenandoahHeap::initialize_mode() {
411 if (ShenandoahGCMode != nullptr) {
412 if (strcmp(ShenandoahGCMode, "satb") == 0) {
413 _gc_mode = new ShenandoahSATBMode();
414 } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
415 _gc_mode = new ShenandoahIUMode();
416 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
417 _gc_mode = new ShenandoahPassiveMode();
418 } else {
419 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
420 }
421 } else {
422 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
423 }
424 _gc_mode->initialize_flags();
425 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
426 vm_exit_during_initialization(
427 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
428 _gc_mode->name()));
429 }
430 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
431 vm_exit_during_initialization(
432 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
433 _gc_mode->name()));
434 }
435 }
436
437 void ShenandoahHeap::initialize_heuristics() {
438 assert(_gc_mode != nullptr, "Must be initialized");
439 _heuristics = _gc_mode->initialize_heuristics();
440
441 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
442 vm_exit_during_initialization(
443 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
444 _heuristics->name()));
445 }
446 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
447 vm_exit_during_initialization(
448 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
449 _heuristics->name()));
450 }
451 }
452
453 #ifdef _MSC_VER
454 #pragma warning( push )
455 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
456 #endif
457
458 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
459 CollectedHeap(),
460 _initial_size(0),
461 _used(0),
462 _committed(0),
463 _bytes_allocated_since_gc_start(0),
464 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
465 _workers(nullptr),
466 _safepoint_workers(nullptr),
467 _heap_region_special(false),
468 _num_regions(0),
469 _regions(nullptr),
470 _update_refs_iterator(this),
471 _gc_state_changed(false),
472 _control_thread(nullptr),
473 _shenandoah_policy(policy),
474 _gc_mode(nullptr),
475 _heuristics(nullptr),
476 _free_set(nullptr),
477 _pacer(nullptr),
478 _verifier(nullptr),
479 _phase_timings(nullptr),
480 _monitoring_support(nullptr),
481 _memory_pool(nullptr),
482 _stw_memory_manager("Shenandoah Pauses"),
483 _cycle_memory_manager("Shenandoah Cycles"),
484 _gc_timer(new ConcurrentGCTimer()),
485 _soft_ref_policy(),
486 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
487 _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
488 _marking_context(nullptr),
489 _bitmap_size(0),
490 _bitmap_regions_per_slice(0),
491 _bitmap_bytes_per_slice(0),
492 _bitmap_region_special(false),
493 _aux_bitmap_region_special(false),
494 _liveness_cache(nullptr),
495 _collection_set(nullptr)
496 {
497 // Initialize GC mode early, so we can adjust barrier support
498 initialize_mode();
499 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
500
501 _max_workers = MAX2(_max_workers, 1U);
502 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
503 if (_workers == nullptr) {
504 vm_exit_during_initialization("Failed necessary allocation.");
505 } else {
506 _workers->initialize_workers();
507 }
508
509 if (ParallelGCThreads > 1) {
510 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
511 ParallelGCThreads);
512 _safepoint_workers->initialize_workers();
513 }
514 }
515
516 #ifdef _MSC_VER
517 #pragma warning( pop )
518 #endif
519
520 class ShenandoahResetBitmapTask : public WorkerTask {
521 private:
522 ShenandoahRegionIterator _regions;
523
524 public:
525 ShenandoahResetBitmapTask() :
526 WorkerTask("Shenandoah Reset Bitmap") {}
527
528 void work(uint worker_id) {
529 ShenandoahHeapRegion* region = _regions.next();
530 ShenandoahHeap* heap = ShenandoahHeap::heap();
531 ShenandoahMarkingContext* const ctx = heap->marking_context();
532 while (region != nullptr) {
533 if (heap->is_bitmap_slice_committed(region)) {
534 ctx->clear_bitmap(region);
535 }
536 region = _regions.next();
537 }
538 }
539 };
540
541 void ShenandoahHeap::reset_mark_bitmap() {
542 assert_gc_workers(_workers->active_workers());
543 mark_incomplete_marking_context();
544
545 ShenandoahResetBitmapTask task;
546 _workers->run_task(&task);
547 }
548
549 void ShenandoahHeap::print_on(outputStream* st) const {
550 st->print_cr("Shenandoah Heap");
551 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
552 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
553 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
554 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
555 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
556 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
557 num_regions(),
558 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
559 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
560
561 st->print("Status: ");
562 if (has_forwarded_objects()) st->print("has forwarded objects, ");
563 if (is_concurrent_mark_in_progress()) st->print("marking, ");
564 if (is_evacuation_in_progress()) st->print("evacuating, ");
565 if (is_update_refs_in_progress()) st->print("updating refs, ");
566 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
567 if (is_full_gc_in_progress()) st->print("full gc, ");
568 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
569 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
570 if (is_concurrent_strong_root_in_progress() &&
571 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
572
573 if (cancelled_gc()) {
574 st->print("cancelled");
575 } else {
576 st->print("not cancelled");
577 }
578 st->cr();
579
580 st->print_cr("Reserved region:");
581 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
582 p2i(reserved_region().start()),
583 p2i(reserved_region().end()));
587 if (cset != nullptr) {
588 st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
589 st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
590 } else {
591 st->print_cr(" (null)");
592 }
593
594 st->cr();
595 MetaspaceUtils::print_on(st);
596
597 if (Verbose) {
598 st->cr();
599 print_heap_regions_on(st);
600 }
601 }
602
603 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
604 public:
605 void do_thread(Thread* thread) {
606 assert(thread != nullptr, "Sanity");
607 assert(thread->is_Worker_thread(), "Only worker thread expected");
608 ShenandoahThreadLocalData::initialize_gclab(thread);
609 }
610 };
611
612 void ShenandoahHeap::post_initialize() {
613 CollectedHeap::post_initialize();
614 MutexLocker ml(Threads_lock);
615
616 ShenandoahInitWorkerGCLABClosure init_gclabs;
617 _workers->threads_do(&init_gclabs);
618
619 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
620 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
621 _workers->set_initialize_gclab();
622 if (_safepoint_workers != nullptr) {
623 _safepoint_workers->threads_do(&init_gclabs);
624 _safepoint_workers->set_initialize_gclab();
625 }
626
627 _heuristics->initialize();
628
629 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
630 }
631
632 size_t ShenandoahHeap::used() const {
633 return Atomic::load(&_used);
634 }
635
636 size_t ShenandoahHeap::committed() const {
637 return Atomic::load(&_committed);
638 }
639
640 void ShenandoahHeap::increase_committed(size_t bytes) {
641 shenandoah_assert_heaplocked_or_safepoint();
642 _committed += bytes;
643 }
644
645 void ShenandoahHeap::decrease_committed(size_t bytes) {
646 shenandoah_assert_heaplocked_or_safepoint();
647 _committed -= bytes;
648 }
649
650 void ShenandoahHeap::increase_used(size_t bytes) {
651 Atomic::add(&_used, bytes, memory_order_relaxed);
652 }
653
654 void ShenandoahHeap::set_used(size_t bytes) {
655 Atomic::store(&_used, bytes);
656 }
657
658 void ShenandoahHeap::decrease_used(size_t bytes) {
659 assert(used() >= bytes, "never decrease heap size by more than we've left");
660 Atomic::sub(&_used, bytes, memory_order_relaxed);
661 }
662
663 void ShenandoahHeap::increase_allocated(size_t bytes) {
664 Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
665 }
666
667 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
668 size_t bytes = words * HeapWordSize;
669 if (!waste) {
670 increase_used(bytes);
671 }
672 increase_allocated(bytes);
673 if (ShenandoahPacing) {
674 control_thread()->pacing_notify_alloc(words);
675 if (waste) {
676 pacer()->claim_for_alloc(words, true);
677 }
678 }
679 }
680
681 size_t ShenandoahHeap::capacity() const {
682 return committed();
683 }
684
685 size_t ShenandoahHeap::max_capacity() const {
686 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
687 }
688
689 size_t ShenandoahHeap::soft_max_capacity() const {
690 size_t v = Atomic::load(&_soft_max_size);
691 assert(min_capacity() <= v && v <= max_capacity(),
692 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
693 min_capacity(), v, max_capacity());
694 return v;
695 }
696
697 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
698 assert(min_capacity() <= v && v <= max_capacity(),
699 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
700 min_capacity(), v, max_capacity());
701 Atomic::store(&_soft_max_size, v);
702 }
703
704 size_t ShenandoahHeap::min_capacity() const {
705 return _minimum_size;
706 }
707
708 size_t ShenandoahHeap::initial_capacity() const {
709 return _initial_size;
710 }
711
712 bool ShenandoahHeap::is_in(const void* p) const {
713 HeapWord* heap_base = (HeapWord*) base();
714 HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
715 return p >= heap_base && p < last_region_end;
716 }
717
718 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
719 assert (ShenandoahUncommit, "should be enabled");
720
721 // Application allocates from the beginning of the heap, and GC allocates at
722 // the end of it. It is more efficient to uncommit from the end, so that applications
723 // could enjoy the near committed regions. GC allocations are much less frequent,
724 // and therefore can accept the committing costs.
725
726 size_t count = 0;
727 for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
728 ShenandoahHeapRegion* r = get_region(i - 1);
729 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
730 ShenandoahHeapLocker locker(lock());
731 if (r->is_empty_committed()) {
732 if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
733 break;
734 }
735
736 r->make_uncommitted();
737 count++;
738 }
739 }
740 SpinPause(); // allow allocators to take the lock
741 }
742
743 if (count > 0) {
744 control_thread()->notify_heap_changed();
745 }
746 }
747
748 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
749 // New object should fit the GCLAB size
750 size_t min_size = MAX2(size, PLAB::min_size());
751
752 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
753 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
754 new_size = MIN2(new_size, PLAB::max_size());
755 new_size = MAX2(new_size, PLAB::min_size());
756
757 // Record new heuristic value even if we take any shortcut. This captures
758 // the case when moderately-sized objects always take a shortcut. At some point,
759 // heuristics should catch up with them.
760 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
761
762 if (new_size < size) {
763 // New size still does not fit the object. Fall back to shared allocation.
764 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
765 return nullptr;
766 }
767
768 // Retire current GCLAB, and allocate a new one.
769 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
770 gclab->retire();
771
772 size_t actual_size = 0;
773 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
774 if (gclab_buf == nullptr) {
775 return nullptr;
776 }
777
778 assert (size <= actual_size, "allocation should fit");
779
780 // ...and clear or zap just allocated TLAB, if needed.
781 if (ZeroTLAB) {
782 Copy::zero_to_words(gclab_buf, actual_size);
783 } else if (ZapTLAB) {
784 // Skip mangling the space corresponding to the object header to
785 // ensure that the returned space is not considered parsable by
786 // any concurrent GC thread.
787 size_t hdr_size = oopDesc::header_size();
788 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
789 }
790 gclab->set_buf(gclab_buf, actual_size);
791 return gclab->allocate(size);
792 }
793
794 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
795 size_t requested_size,
796 size_t* actual_size) {
797 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
798 HeapWord* res = allocate_memory(req);
799 if (res != nullptr) {
800 *actual_size = req.actual_size();
801 } else {
802 *actual_size = 0;
803 }
804 return res;
805 }
806
807 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
808 size_t word_size,
809 size_t* actual_size) {
810 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
811 HeapWord* res = allocate_memory(req);
812 if (res != nullptr) {
813 *actual_size = req.actual_size();
815 *actual_size = 0;
816 }
817 return res;
818 }
819
820 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
821 intptr_t pacer_epoch = 0;
822 bool in_new_region = false;
823 HeapWord* result = nullptr;
824
825 if (req.is_mutator_alloc()) {
826 if (ShenandoahPacing) {
827 pacer()->pace_for_alloc(req.size());
828 pacer_epoch = pacer()->epoch();
829 }
830
831 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
832 result = allocate_memory_under_lock(req, in_new_region);
833 }
834
835 // Allocation failed, block until control thread reacted, then retry allocation.
836 //
837 // It might happen that one of the threads requesting allocation would unblock
838 // way later after GC happened, only to fail the second allocation, because
839 // other threads have already depleted the free storage. In this case, a better
840 // strategy is to try again, as long as GC makes progress (or until at least
841 // one full GC has completed).
842 size_t original_count = shenandoah_policy()->full_gc_count();
843 while (result == nullptr
844 && (_progress_last_gc.is_set() || original_count == shenandoah_policy()->full_gc_count())) {
845 control_thread()->handle_alloc_failure(req);
846 result = allocate_memory_under_lock(req, in_new_region);
847 }
848 } else {
849 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
850 result = allocate_memory_under_lock(req, in_new_region);
851 // Do not call handle_alloc_failure() here, because we cannot block.
852 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
853 }
854
855 if (in_new_region) {
856 control_thread()->notify_heap_changed();
857 }
858
859 if (result != nullptr) {
860 size_t requested = req.size();
861 size_t actual = req.actual_size();
862
863 assert (req.is_lab_alloc() || (requested == actual),
864 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
865 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
866
867 if (req.is_mutator_alloc()) {
868 notify_mutator_alloc_words(actual, false);
869
870 // If we requested more than we were granted, give the rest back to pacer.
871 // This only matters if we are in the same pacing epoch: do not try to unpace
872 // over the budget for the other phase.
873 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
874 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
875 }
876 } else {
877 increase_used(actual*HeapWordSize);
878 }
879 }
880
881 return result;
882 }
883
884 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
885 // If we are dealing with mutator allocation, then we may need to block for safepoint.
886 // We cannot block for safepoint for GC allocations, because there is a high chance
887 // we are already running at safepoint or from stack watermark machinery, and we cannot
888 // block again.
889 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
890 return _free_set->allocate(req, in_new_region);
891 }
892
893 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
894 bool* gc_overhead_limit_was_exceeded) {
895 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
896 return allocate_memory(req);
897 }
898
899 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
900 size_t size,
901 Metaspace::MetadataType mdtype) {
902 MetaWord* result;
903
904 // Inform metaspace OOM to GC heuristics if class unloading is possible.
905 if (heuristics()->can_unload_classes()) {
906 ShenandoahHeuristics* h = heuristics();
907 h->record_metaspace_oom();
908 }
909
910 // Expand and retry allocation
911 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
912 if (result != nullptr) {
913 return result;
914 }
915
916 // Start full GC
917 collect(GCCause::_metadata_GC_clear_soft_refs);
918
919 // Retry allocation
920 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
921 if (result != nullptr) {
922 return result;
923 }
924
925 // Expand and retry allocation
926 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
978
979 private:
980 void do_work() {
981 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
982 ShenandoahHeapRegion* r;
983 while ((r =_cs->claim_next()) != nullptr) {
984 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
985 _sh->marked_object_iterate(r, &cl);
986
987 if (ShenandoahPacing) {
988 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
989 }
990
991 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
992 break;
993 }
994 }
995 }
996 };
997
998 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
999 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1000 workers()->run_task(&task);
1001 }
1002
1003 void ShenandoahHeap::trash_cset_regions() {
1004 ShenandoahHeapLocker locker(lock());
1005
1006 ShenandoahCollectionSet* set = collection_set();
1007 ShenandoahHeapRegion* r;
1008 set->clear_current_index();
1009 while ((r = set->next()) != nullptr) {
1010 r->make_trash();
1011 }
1012 collection_set()->clear();
1013 }
1014
1015 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1016 st->print_cr("Heap Regions:");
1017 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1018 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1019 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1020 st->print_cr("UWM=update watermark, U=used");
1021 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1022 st->print_cr("S=shared allocs, L=live data");
1023 st->print_cr("CP=critical pins");
1024
1025 for (size_t i = 0; i < num_regions(); i++) {
1026 get_region(i)->print_on(st);
1027 }
1028 }
1029
1030 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1031 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1032
1033 oop humongous_obj = cast_to_oop(start->bottom());
1034 size_t size = humongous_obj->size();
1035 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1036 size_t index = start->index() + required_regions - 1;
1037
1038 assert(!start->has_live(), "liveness must be zero");
1039
1040 for(size_t i = 0; i < required_regions; i++) {
1041 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1042 // as it expects that every region belongs to a humongous region starting with a humongous start region.
1043 ShenandoahHeapRegion* region = get_region(index --);
1044
1045 assert(region->is_humongous(), "expect correct humongous start or continuation");
1046 assert(!region->is_cset(), "Humongous region should not be in collection set");
1047
1048 region->make_trash_immediate();
1049 }
1050 }
1051
1052 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1053 public:
1054 ShenandoahCheckCleanGCLABClosure() {}
1055 void do_thread(Thread* thread) {
1056 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1057 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1058 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1059 }
1060 };
1061
1062 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1063 private:
1064 bool const _resize;
1065 public:
1066 ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1067 void do_thread(Thread* thread) {
1068 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1069 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1070 gclab->retire();
1071 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1072 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1073 }
1074 }
1075 };
1076
1077 void ShenandoahHeap::labs_make_parsable() {
1078 assert(UseTLAB, "Only call with UseTLAB");
1079
1080 ShenandoahRetireGCLABClosure cl(false);
1081
1082 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1083 ThreadLocalAllocBuffer& tlab = t->tlab();
1084 tlab.make_parsable();
1085 cl.do_thread(t);
1086 }
1087
1088 workers()->threads_do(&cl);
1089 }
1090
1091 void ShenandoahHeap::tlabs_retire(bool resize) {
1092 assert(UseTLAB, "Only call with UseTLAB");
1093 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1094
1095 ThreadLocalAllocStats stats;
1096
1097 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1098 ThreadLocalAllocBuffer& tlab = t->tlab();
1099 tlab.retire(&stats);
1100 if (resize) {
1101 tlab.resize();
1102 }
1103 }
1104
1105 stats.publish();
1106
1107 #ifdef ASSERT
1108 ShenandoahCheckCleanGCLABClosure cl;
1109 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1110 cl.do_thread(t);
1111 }
1112 workers()->threads_do(&cl);
1113 #endif
1114 }
1115
1116 void ShenandoahHeap::gclabs_retire(bool resize) {
1117 assert(UseTLAB, "Only call with UseTLAB");
1118 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1119
1120 ShenandoahRetireGCLABClosure cl(resize);
1121 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1122 cl.do_thread(t);
1123 }
1124 workers()->threads_do(&cl);
1125
1126 if (safepoint_workers() != nullptr) {
1127 safepoint_workers()->threads_do(&cl);
1128 }
1129 }
1130
1131 // Returns size in bytes
1132 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1133 // Return the max allowed size, and let the allocation path
1134 // figure out the safe size for current allocation.
1135 return ShenandoahHeapRegion::max_tlab_size_bytes();
1136 }
1137
1138 size_t ShenandoahHeap::max_tlab_size() const {
1139 // Returns size in words
1140 return ShenandoahHeapRegion::max_tlab_size_words();
1141 }
1142
1143 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1167 }
1168 return nullptr;
1169 }
1170
1171 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1172 ShenandoahHeapRegion* r = heap_region_containing(addr);
1173 return r->block_is_obj(addr);
1174 }
1175
1176 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1177 return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1178 }
1179
1180 void ShenandoahHeap::prepare_for_verify() {
1181 if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1182 labs_make_parsable();
1183 }
1184 }
1185
1186 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1187 tcl->do_thread(_control_thread);
1188 workers()->threads_do(tcl);
1189 if (_safepoint_workers != nullptr) {
1190 _safepoint_workers->threads_do(tcl);
1191 }
1192 }
1193
1194 void ShenandoahHeap::print_tracing_info() const {
1195 LogTarget(Info, gc, stats) lt;
1196 if (lt.is_enabled()) {
1197 ResourceMark rm;
1198 LogStream ls(lt);
1199
1200 phase_timings()->print_global_on(&ls);
1201
1202 ls.cr();
1203 ls.cr();
1204
1205 shenandoah_policy()->print_gc_stats(&ls);
1206
1207 ls.cr();
1208 ls.cr();
1209 }
1210 }
1211
1212 void ShenandoahHeap::verify(VerifyOption vo) {
1213 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1214 if (ShenandoahVerify) {
1215 verifier()->verify_generic(vo);
1216 } else {
1217 // TODO: Consider allocating verification bitmaps on demand,
1218 // and turn this on unconditionally.
1219 }
1220 }
1221 }
1222 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1223 return _free_set->capacity();
1224 }
1225
1226 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1227 private:
1228 MarkBitMap* _bitmap;
1229 ShenandoahScanObjectStack* _oop_stack;
1230 ShenandoahHeap* const _heap;
1231 ShenandoahMarkingContext* const _marking_context;
1526 const uint active_workers = workers()->active_workers();
1527 const size_t n_regions = num_regions();
1528 size_t stride = ShenandoahParallelRegionStride;
1529 if (stride == 0 && active_workers > 1) {
1530 // Automatically derive the stride to balance the work between threads
1531 // evenly. Do not try to split work if below the reasonable threshold.
1532 constexpr size_t threshold = 4096;
1533 stride = n_regions <= threshold ?
1534 threshold :
1535 (n_regions + active_workers - 1) / active_workers;
1536 }
1537
1538 if (n_regions > stride && active_workers > 1) {
1539 ShenandoahParallelHeapRegionTask task(blk, stride);
1540 workers()->run_task(&task);
1541 } else {
1542 heap_region_iterate(blk);
1543 }
1544 }
1545
1546 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1547 private:
1548 ShenandoahMarkingContext* const _ctx;
1549 public:
1550 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1551
1552 void heap_region_do(ShenandoahHeapRegion* r) {
1553 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1554 if (r->is_active()) {
1555 // Check if region needs updating its TAMS. We have updated it already during concurrent
1556 // reset, so it is very likely we don't need to do another write here.
1557 if (_ctx->top_at_mark_start(r) != r->top()) {
1558 _ctx->capture_top_at_mark_start(r);
1559 }
1560 } else {
1561 assert(_ctx->top_at_mark_start(r) == r->top(),
1562 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1563 }
1564 }
1565
1566 bool is_thread_safe() { return true; }
1567 };
1568
1569 class ShenandoahRendezvousClosure : public HandshakeClosure {
1570 public:
1571 inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1572 inline void do_thread(Thread* thread) {}
1573 };
1574
1575 void ShenandoahHeap::rendezvous_threads() {
1576 ShenandoahRendezvousClosure cl;
1577 Handshake::execute(&cl);
1578 }
1579
1580 void ShenandoahHeap::recycle_trash() {
1581 free_set()->recycle_trash();
1582 }
1583
1584 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1585 private:
1586 ShenandoahMarkingContext* const _ctx;
1587 public:
1588 ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1589
1590 void heap_region_do(ShenandoahHeapRegion* r) {
1591 if (r->is_active()) {
1592 // Reset live data and set TAMS optimistically. We would recheck these under the pause
1593 // anyway to capture any updates that happened since now.
1594 r->clear_live_data();
1595 _ctx->capture_top_at_mark_start(r);
1596 }
1597 }
1598
1599 bool is_thread_safe() { return true; }
1600 };
1601
1602 void ShenandoahHeap::prepare_gc() {
1603 reset_mark_bitmap();
1604
1605 ShenandoahResetUpdateRegionStateClosure cl;
1606 parallel_heap_region_iterate(&cl);
1607 }
1608
1609 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1610 private:
1611 ShenandoahMarkingContext* const _ctx;
1612 ShenandoahHeapLock* const _lock;
1613
1614 public:
1615 ShenandoahFinalMarkUpdateRegionStateClosure() :
1616 _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1617
1618 void heap_region_do(ShenandoahHeapRegion* r) {
1619 if (r->is_active()) {
1620 // All allocations past TAMS are implicitly live, adjust the region data.
1621 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1622 HeapWord *tams = _ctx->top_at_mark_start(r);
1623 HeapWord *top = r->top();
1624 if (top > tams) {
1625 r->increase_live_data_alloc_words(pointer_delta(top, tams));
1626 }
1627
1628 // We are about to select the collection set, make sure it knows about
1629 // current pinning status. Also, this allows trashing more regions that
1630 // now have their pinning status dropped.
1631 if (r->is_pinned()) {
1632 if (r->pin_count() == 0) {
1633 ShenandoahHeapLocker locker(_lock);
1634 r->make_unpinned();
1635 }
1636 } else {
1637 if (r->pin_count() > 0) {
1638 ShenandoahHeapLocker locker(_lock);
1639 r->make_pinned();
1640 }
1641 }
1642
1643 // Remember limit for updating refs. It's guaranteed that we get no
1644 // from-space-refs written from here on.
1645 r->set_update_watermark_at_safepoint(r->top());
1646 } else {
1647 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1648 assert(_ctx->top_at_mark_start(r) == r->top(),
1649 "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1650 }
1651 }
1652
1653 bool is_thread_safe() { return true; }
1654 };
1655
1656 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1657 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1658 {
1659 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1660 ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1661 ShenandoahFinalMarkUpdateRegionStateClosure cl;
1662 parallel_heap_region_iterate(&cl);
1663
1664 assert_pinned_region_status();
1665 }
1666
1667 {
1668 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1669 ShenandoahPhaseTimings::degen_gc_choose_cset);
1670 ShenandoahHeapLocker locker(lock());
1671 _collection_set->clear();
1672 heuristics()->choose_collection_set(_collection_set);
1673 }
1674
1675 {
1676 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1677 ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1678 ShenandoahHeapLocker locker(lock());
1679 _free_set->rebuild();
1680 }
1681 }
1682
1683 void ShenandoahHeap::do_class_unloading() {
1684 _unloader.unload();
1685 }
1686
1687 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1688 // Weak refs processing
1689 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1690 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1691 ShenandoahTimingsTracker t(phase);
1692 ShenandoahGCWorkerPhase worker_phase(phase);
1693 ref_processor()->process_references(phase, workers(), false /* concurrent */);
1694 }
1695
1696 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1697 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1698
1699 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1700 // make them parsable for update code to work correctly. Plus, we can compute new sizes
1701 // for future GCLABs here.
1702 if (UseTLAB) {
1703 ShenandoahGCPhase phase(concurrent ?
1704 ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1705 ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1706 gclabs_retire(ResizeTLAB);
1707 }
1708
1709 _update_refs_iterator.reset();
1710 }
1711
1712 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1713 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1714 if (_gc_state_changed) {
1715 _gc_state_changed = false;
1716 char state = gc_state();
1717 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1718 ShenandoahThreadLocalData::set_gc_state(t, state);
1719 }
1720 }
1721 }
1722
1723 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1724 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1725 _gc_state.set_cond(mask, value);
1726 _gc_state_changed = true;
1727 }
1728
1729 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1730 assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1731 set_gc_state(MARKING, in_progress);
1732 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1733 }
1734
1735 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1736 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1737 set_gc_state(EVACUATION, in_progress);
1738 }
1739
1740 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1741 if (in_progress) {
1742 _concurrent_strong_root_in_progress.set();
1743 } else {
1744 _concurrent_strong_root_in_progress.unset();
1745 }
1746 }
1747
1748 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1749 set_gc_state(WEAK_ROOTS, cond);
1750 }
1751
1752 GCTracer* ShenandoahHeap::tracer() {
1753 return shenandoah_policy()->tracer();
1754 }
1755
1756 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1757 return _free_set->used();
1758 }
1759
1760 bool ShenandoahHeap::try_cancel_gc() {
1761 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1762 return prev == CANCELLABLE;
1763 }
1764
1765 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1766 if (try_cancel_gc()) {
1767 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1768 log_info(gc)("%s", msg.buffer());
1769 Events::log(Thread::current(), "%s", msg.buffer());
1770 }
1771 }
1772
1773 uint ShenandoahHeap::max_workers() {
1774 return _max_workers;
1775 }
1776
1777 void ShenandoahHeap::stop() {
1778 // The shutdown sequence should be able to terminate when GC is running.
1779
1780 // Step 0. Notify policy to disable event recording.
1781 _shenandoah_policy->record_shutdown();
1782
1783 // Step 1. Notify control thread that we are in shutdown.
1784 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1785 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1786 control_thread()->prepare_for_graceful_shutdown();
1787
1788 // Step 2. Notify GC workers that we are cancelling GC.
1789 cancel_gc(GCCause::_shenandoah_stop_vm);
1790
1791 // Step 3. Wait until GC worker exits normally.
1792 control_thread()->stop();
1793 }
1794
1795 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1796 if (!unload_classes()) return;
1797 ClassUnloadingContext ctx(_workers->active_workers(),
1798 true /* unregister_nmethods_during_purge */,
1799 false /* lock_codeblob_free_separately */);
1800
1801 // Unload classes and purge SystemDictionary.
1802 {
1803 ShenandoahPhaseTimings::Phase phase = full_gc ?
1804 ShenandoahPhaseTimings::full_gc_purge_class_unload :
1805 ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1806 ShenandoahIsAliveSelector is_alive;
1807 {
1808 CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
1809 ShenandoahGCPhase gc_phase(phase);
1810 ShenandoahGCWorkerPhase worker_phase(phase);
1811 bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
1812
1813 uint num_workers = _workers->active_workers();
1814 ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
1815 _workers->run_task(&unlink_task);
1816 }
1817 // Release unloaded nmethods's memory.
1818 ClassUnloadingContext::context()->purge_and_free_nmethods();
1819 }
1820
1821 {
1822 ShenandoahGCPhase phase(full_gc ?
1823 ShenandoahPhaseTimings::full_gc_purge_cldg :
1824 ShenandoahPhaseTimings::degen_gc_purge_cldg);
1825 ClassLoaderDataGraph::purge(true /* at_safepoint */);
1826 }
1827 // Resize and verify metaspace
1828 MetaspaceGC::compute_new_size();
1829 DEBUG_ONLY(MetaspaceUtils::verify();)
1830 }
1831
1832 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
1833 // so they should not have forwarded oops.
1834 // However, we do need to "null" dead oops in the roots, if can not be done
1835 // in concurrent cycles.
1836 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
1837 uint num_workers = _workers->active_workers();
1838 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
1839 ShenandoahPhaseTimings::full_gc_purge_weak_par :
1840 ShenandoahPhaseTimings::degen_gc_purge_weak_par;
1841 ShenandoahGCPhase phase(timing_phase);
1842 ShenandoahGCWorkerPhase worker_phase(timing_phase);
1843 // Cleanup weak roots
1844 if (has_forwarded_objects()) {
1845 ShenandoahForwardedIsAliveClosure is_alive;
1846 ShenandoahUpdateRefsClosure keep_alive;
1847 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
1848 cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
1849 _workers->run_task(&cleaning_task);
1850 } else {
1851 ShenandoahIsAliveClosure is_alive;
1852 #ifdef ASSERT
1856 #else
1857 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
1858 cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
1859 #endif
1860 _workers->run_task(&cleaning_task);
1861 }
1862 }
1863
1864 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
1865 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1866 assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
1867 ShenandoahGCPhase phase(full_gc ?
1868 ShenandoahPhaseTimings::full_gc_purge :
1869 ShenandoahPhaseTimings::degen_gc_purge);
1870 stw_weak_refs(full_gc);
1871 stw_process_weak_roots(full_gc);
1872 stw_unload_classes(full_gc);
1873 }
1874
1875 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1876 set_gc_state(HAS_FORWARDED, cond);
1877 }
1878
1879 void ShenandoahHeap::set_unload_classes(bool uc) {
1880 _unload_classes.set_cond(uc);
1881 }
1882
1883 bool ShenandoahHeap::unload_classes() const {
1884 return _unload_classes.is_set();
1885 }
1886
1887 address ShenandoahHeap::in_cset_fast_test_addr() {
1888 ShenandoahHeap* heap = ShenandoahHeap::heap();
1889 assert(heap->collection_set() != nullptr, "Sanity");
1890 return (address) heap->collection_set()->biased_map_address();
1891 }
1892
1893 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1894 return Atomic::load(&_bytes_allocated_since_gc_start);
1895 }
1896
1897 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1898 Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
1899 }
1900
1901 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1902 _degenerated_gc_in_progress.set_cond(in_progress);
1903 }
1904
1905 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1906 _full_gc_in_progress.set_cond(in_progress);
1907 }
1908
1909 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1910 assert (is_full_gc_in_progress(), "should be");
1911 _full_gc_move_in_progress.set_cond(in_progress);
1912 }
1913
1914 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1915 set_gc_state(UPDATEREFS, in_progress);
1916 }
1917
1918 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1919 ShenandoahCodeRoots::register_nmethod(nm);
1920 }
1921
1922 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
1923 ShenandoahCodeRoots::unregister_nmethod(nm);
1924 }
1925
1926 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
1927 heap_region_containing(o)->record_pin();
1928 }
1929
1930 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
1931 ShenandoahHeapRegion* r = heap_region_containing(o);
1932 assert(r != nullptr, "Sanity");
1933 assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
1934 r->record_unpin();
1935 }
1942 if (r->is_active()) {
1943 if (r->is_pinned()) {
1944 if (r->pin_count() == 0) {
1945 r->make_unpinned();
1946 }
1947 } else {
1948 if (r->pin_count() > 0) {
1949 r->make_pinned();
1950 }
1951 }
1952 }
1953 }
1954
1955 assert_pinned_region_status();
1956 }
1957
1958 #ifdef ASSERT
1959 void ShenandoahHeap::assert_pinned_region_status() {
1960 for (size_t i = 0; i < num_regions(); i++) {
1961 ShenandoahHeapRegion* r = get_region(i);
1962 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1963 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
1964 }
1965 }
1966 #endif
1967
1968 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1969 return _gc_timer;
1970 }
1971
1972 void ShenandoahHeap::prepare_concurrent_roots() {
1973 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1974 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1975 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1976 set_concurrent_weak_root_in_progress(true);
1977 if (unload_classes()) {
1978 _unloader.prepare();
1979 }
1980 }
1981
1982 void ShenandoahHeap::finish_concurrent_roots() {
1983 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1984 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1985 if (unload_classes()) {
1986 _unloader.finish();
1987 }
1988 }
1989
1990 #ifdef ASSERT
1991 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
1992 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
1993
1994 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1995 if (UseDynamicNumberOfGCThreads) {
1996 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
1997 } else {
1998 // Use ParallelGCThreads inside safepoints
1999 assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
2000 }
2001 } else {
2002 if (UseDynamicNumberOfGCThreads) {
2003 assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2004 } else {
2005 // Use ConcGCThreads outside safepoints
2006 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2007 }
2008 }
2009 }
2010 #endif
2011
2012 ShenandoahVerifier* ShenandoahHeap::verifier() {
2013 guarantee(ShenandoahVerify, "Should be enabled");
2014 assert (_verifier != nullptr, "sanity");
2015 return _verifier;
2016 }
2017
2018 template<bool CONCURRENT>
2019 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2020 private:
2021 ShenandoahHeap* _heap;
2022 ShenandoahRegionIterator* _regions;
2023 public:
2024 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2025 WorkerTask("Shenandoah Update References"),
2026 _heap(ShenandoahHeap::heap()),
2027 _regions(regions) {
2028 }
2029
2030 void work(uint worker_id) {
2031 if (CONCURRENT) {
2032 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2033 ShenandoahSuspendibleThreadSetJoiner stsj;
2034 do_work<ShenandoahConcUpdateRefsClosure>();
2035 } else {
2036 ShenandoahParallelWorkerSession worker_session(worker_id);
2037 do_work<ShenandoahSTWUpdateRefsClosure>();
2038 }
2039 }
2040
2041 private:
2042 template<class T>
2043 void do_work() {
2044 T cl;
2045 ShenandoahHeapRegion* r = _regions->next();
2046 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2047 while (r != nullptr) {
2048 HeapWord* update_watermark = r->get_update_watermark();
2049 assert (update_watermark >= r->bottom(), "sanity");
2050 if (r->is_active() && !r->is_cset()) {
2051 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2052 }
2053 if (ShenandoahPacing) {
2054 _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2055 }
2056 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2057 return;
2058 }
2059 r = _regions->next();
2060 }
2061 }
2062 };
2063
2064 void ShenandoahHeap::update_heap_references(bool concurrent) {
2065 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2066
2067 if (concurrent) {
2068 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2069 workers()->run_task(&task);
2070 } else {
2071 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2072 workers()->run_task(&task);
2073 }
2074 }
2075
2076
2077 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2078 private:
2079 ShenandoahHeapLock* const _lock;
2080
2081 public:
2082 ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2083
2084 void heap_region_do(ShenandoahHeapRegion* r) {
2085 // Drop unnecessary "pinned" state from regions that does not have CP marks
2086 // anymore, as this would allow trashing them.
2087
2088 if (r->is_active()) {
2089 if (r->is_pinned()) {
2090 if (r->pin_count() == 0) {
2091 ShenandoahHeapLocker locker(_lock);
2092 r->make_unpinned();
2093 }
2094 } else {
2095 if (r->pin_count() > 0) {
2096 ShenandoahHeapLocker locker(_lock);
2097 r->make_pinned();
2098 }
2099 }
2100 }
2101 }
2102
2103 bool is_thread_safe() { return true; }
2104 };
2105
2106 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2107 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2108 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2109
2110 {
2111 ShenandoahGCPhase phase(concurrent ?
2112 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2113 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2114 ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2115 parallel_heap_region_iterate(&cl);
2116
2117 assert_pinned_region_status();
2118 }
2119
2120 {
2121 ShenandoahGCPhase phase(concurrent ?
2122 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2123 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2124 trash_cset_regions();
2125 }
2126 }
2127
2128 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2129 {
2130 ShenandoahGCPhase phase(concurrent ?
2131 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2132 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2133 ShenandoahHeapLocker locker(lock());
2134 _free_set->rebuild();
2135 }
2136 }
2137
2138 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2139 print_on(st);
2140 st->cr();
2141 print_heap_regions_on(st);
2142 }
2143
2144 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2145 size_t slice = r->index() / _bitmap_regions_per_slice;
2146
2147 size_t regions_from = _bitmap_regions_per_slice * slice;
2148 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2149 for (size_t g = regions_from; g < regions_to; g++) {
2150 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2151 if (skip_self && g == r->index()) continue;
2152 if (get_region(g)->is_committed()) {
2153 return true;
2154 }
2180 return false;
2181 }
2182
2183 if (AlwaysPreTouch) {
2184 os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2185 }
2186
2187 return true;
2188 }
2189
2190 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2191 shenandoah_assert_heaplocked();
2192
2193 // Bitmaps in special regions do not need uncommits
2194 if (_bitmap_region_special) {
2195 return true;
2196 }
2197
2198 if (is_bitmap_slice_committed(r, true)) {
2199 // Some other region from the group is still committed, meaning the bitmap
2200 // slice is should stay committed, exit right away.
2201 return true;
2202 }
2203
2204 // Uncommit the bitmap slice:
2205 size_t slice = r->index() / _bitmap_regions_per_slice;
2206 size_t off = _bitmap_bytes_per_slice * slice;
2207 size_t len = _bitmap_bytes_per_slice;
2208 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2209 return false;
2210 }
2211 return true;
2212 }
2213
2214 void ShenandoahHeap::safepoint_synchronize_begin() {
2215 SuspendibleThreadSet::synchronize();
2216 }
2217
2218 void ShenandoahHeap::safepoint_synchronize_end() {
2219 SuspendibleThreadSet::desynchronize();
2220 }
2221
2222 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2223 static const char *msg = "Concurrent uncommit";
2224 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2225 EventMark em("%s", msg);
2226
2227 op_uncommit(shrink_before, shrink_until);
2228 }
2229
2230 void ShenandoahHeap::try_inject_alloc_failure() {
2231 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2232 _inject_alloc_failure.set();
2233 os::naked_short_sleep(1);
2234 if (cancelled_gc()) {
2235 log_info(gc)("Allocation failure was successfully injected");
2236 }
2237 }
2238 }
2239
2240 bool ShenandoahHeap::should_inject_alloc_failure() {
2241 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2242 }
2243
2244 void ShenandoahHeap::initialize_serviceability() {
2245 _memory_pool = new ShenandoahMemoryPool(this);
2246 _cycle_memory_manager.add_pool(_memory_pool);
2247 _stw_memory_manager.add_pool(_memory_pool);
2248 }
2249
2250 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2251 GrowableArray<GCMemoryManager*> memory_managers(2);
2252 memory_managers.append(&_cycle_memory_manager);
2253 memory_managers.append(&_stw_memory_manager);
2254 return memory_managers;
2255 }
2256
2257 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2258 GrowableArray<MemoryPool*> memory_pools(1);
2259 memory_pools.append(_memory_pool);
2260 return memory_pools;
2261 }
2262
2263 MemoryUsage ShenandoahHeap::memory_usage() {
2264 return _memory_pool->get_memory_usage();
2265 }
2266
2267 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2268 _heap(ShenandoahHeap::heap()),
2269 _index(0) {}
2270
2271 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2272 _heap(heap),
2273 _index(0) {}
2274
2275 void ShenandoahRegionIterator::reset() {
2276 _index = 0;
2277 }
2278
2279 bool ShenandoahRegionIterator::has_next() const {
2280 return _index < _heap->num_regions();
2281 }
2282
2283 char ShenandoahHeap::gc_state() const {
2284 return _gc_state.raw_value();
2285 }
2286
2287 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2288 #ifdef ASSERT
2289 assert(_liveness_cache != nullptr, "sanity");
2290 assert(worker_id < _max_workers, "sanity");
2291 for (uint i = 0; i < num_regions(); i++) {
2292 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2293 }
2294 #endif
2295 return _liveness_cache[worker_id];
2296 }
2297
2298 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2299 assert(worker_id < _max_workers, "sanity");
2300 assert(_liveness_cache != nullptr, "sanity");
2301 ShenandoahLiveData* ld = _liveness_cache[worker_id];
2302 for (uint i = 0; i < num_regions(); i++) {
2303 ShenandoahLiveData live = ld[i];
2304 if (live > 0) {
2305 ShenandoahHeapRegion* r = get_region(i);
2306 r->increase_live_data_gc_words(live);
2309 }
2310 }
2311
2312 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2313 if (is_idle()) return false;
2314
2315 // Objects allocated after marking start are implicitly alive, don't need any barriers during
2316 // marking phase.
2317 if (is_concurrent_mark_in_progress() &&
2318 !marking_context()->allocated_after_mark_start(obj)) {
2319 return true;
2320 }
2321
2322 // Can not guarantee obj is deeply good.
2323 if (has_forwarded_objects()) {
2324 return true;
2325 }
2326
2327 return false;
2328 }
|
1 /*
2 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "memory/allocation.hpp"
29 #include "memory/universe.hpp"
30
31 #include "gc/shared/classUnloadingContext.hpp"
32 #include "gc/shared/gcArguments.hpp"
33 #include "gc/shared/gcTimer.hpp"
34 #include "gc/shared/gcTraceTime.inline.hpp"
35 #include "gc/shared/locationPrinter.inline.hpp"
36 #include "gc/shared/memAllocator.hpp"
37 #include "gc/shared/plab.hpp"
38 #include "gc/shared/tlab_globals.hpp"
39
40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
42 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
43 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
44 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
45 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
46 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
47 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
48 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
49 #include "gc/shenandoah/shenandoahControlThread.hpp"
50 #include "gc/shenandoah/shenandoahFreeSet.hpp"
51 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
52 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
53 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
54 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
55 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
56 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
57 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
58 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
59 #include "gc/shenandoah/shenandoahInitLogger.hpp"
60 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
61 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
62 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
63 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
64 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
65 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
66 #include "gc/shenandoah/shenandoahPadding.hpp"
67 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
68 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
69 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
70 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
71 #include "gc/shenandoah/shenandoahSTWMark.hpp"
72 #include "gc/shenandoah/shenandoahUncommitThread.hpp"
73 #include "gc/shenandoah/shenandoahUtils.hpp"
74 #include "gc/shenandoah/shenandoahVerifier.hpp"
75 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
76 #include "gc/shenandoah/shenandoahVMOperations.hpp"
77 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
78 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
79 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
80 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
81 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
82 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
83 #include "utilities/globalDefinitions.hpp"
84
85 #if INCLUDE_JFR
86 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
87 #endif
88
89 #include "classfile/systemDictionary.hpp"
90 #include "code/codeCache.hpp"
91 #include "memory/classLoaderMetaspace.hpp"
92 #include "memory/metaspaceUtils.hpp"
93 #include "oops/compressedOops.inline.hpp"
94 #include "prims/jvmtiTagMap.hpp"
95 #include "runtime/atomic.hpp"
96 #include "runtime/globals.hpp"
97 #include "runtime/interfaceSupport.inline.hpp"
98 #include "runtime/java.hpp"
99 #include "runtime/orderAccess.hpp"
100 #include "runtime/safepointMechanism.hpp"
101 #include "runtime/threads.hpp"
102 #include "runtime/vmThread.hpp"
103 #include "services/mallocTracker.hpp"
104 #include "services/memTracker.hpp"
105 #include "utilities/events.hpp"
106 #include "utilities/powerOfTwo.hpp"
107
108 class ShenandoahPretouchHeapTask : public WorkerTask {
109 private:
110 ShenandoahRegionIterator _regions;
111 const size_t _page_size;
112 public:
113 ShenandoahPretouchHeapTask(size_t page_size) :
114 WorkerTask("Shenandoah Pretouch Heap"),
115 _page_size(page_size) {}
116
117 virtual void work(uint worker_id) {
118 ShenandoahHeapRegion* r = _regions.next();
119 while (r != nullptr) {
120 if (r->is_committed()) {
121 os::pretouch_memory(r->bottom(), r->end(), _page_size);
157 jint ShenandoahHeap::initialize() {
158 //
159 // Figure out heap sizing
160 //
161
162 size_t init_byte_size = InitialHeapSize;
163 size_t min_byte_size = MinHeapSize;
164 size_t max_byte_size = MaxHeapSize;
165 size_t heap_alignment = HeapAlignment;
166
167 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
168
169 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
170 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
171
172 _num_regions = ShenandoahHeapRegion::region_count();
173 assert(_num_regions == (max_byte_size / reg_size_bytes),
174 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
175 _num_regions, max_byte_size, reg_size_bytes);
176
177 size_t num_committed_regions = init_byte_size / reg_size_bytes;
178 num_committed_regions = MIN2(num_committed_regions, _num_regions);
179 assert(num_committed_regions <= _num_regions, "sanity");
180 _initial_size = num_committed_regions * reg_size_bytes;
181
182 size_t num_min_regions = min_byte_size / reg_size_bytes;
183 num_min_regions = MIN2(num_min_regions, _num_regions);
184 assert(num_min_regions <= _num_regions, "sanity");
185 _minimum_size = num_min_regions * reg_size_bytes;
186
187 // Default to max heap size.
188 _soft_max_size = _num_regions * reg_size_bytes;
189
190 _committed = _initial_size;
191
192 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
193 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
194 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
195
196 //
197 // Reserve and commit memory for heap
198 //
199
200 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
201 initialize_reserved_region(heap_rs);
202 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
203 _heap_region_special = heap_rs.special();
204
205 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
206 "Misaligned heap: " PTR_FORMAT, p2i(base()));
207 os::trace_page_sizes_for_requested_size("Heap",
208 max_byte_size, heap_rs.page_size(), heap_alignment,
209 heap_rs.base(), heap_rs.size());
210
211 #if SHENANDOAH_OPTIMIZED_MARKTASK
212 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
213 // Fail if we ever attempt to address more than we can.
214 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
215 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
216 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
217 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
218 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
219 vm_exit_during_initialization("Fatal Error", buf);
220 }
221 #endif
222
223 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
224 if (!_heap_region_special) {
225 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
226 "Cannot commit heap memory");
227 }
228
229 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
230
231 // Now we know the number of regions and heap sizes, initialize the heuristics.
232 initialize_heuristics();
233
234 assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
235
236 //
237 // Worker threads must be initialized after the barrier is configured
238 //
239 _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
240 if (_workers == nullptr) {
241 vm_exit_during_initialization("Failed necessary allocation.");
242 } else {
243 _workers->initialize_workers();
244 }
245
246 if (ParallelGCThreads > 1) {
247 _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
248 _safepoint_workers->initialize_workers();
249 }
250
251 //
252 // Reserve and commit memory for bitmap(s)
253 //
254
255 size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
256 _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
257
258 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
259
260 guarantee(bitmap_bytes_per_region != 0,
261 "Bitmap bytes per region should not be zero");
262 guarantee(is_power_of_2(bitmap_bytes_per_region),
263 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
264
265 if (bitmap_page_size > bitmap_bytes_per_region) {
266 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
267 _bitmap_bytes_per_slice = bitmap_page_size;
268 } else {
269 _bitmap_regions_per_slice = 1;
270 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
271 }
272
273 guarantee(_bitmap_regions_per_slice >= 1,
274 "Should have at least one region per slice: " SIZE_FORMAT,
275 _bitmap_regions_per_slice);
276
277 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
278 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
279 _bitmap_bytes_per_slice, bitmap_page_size);
280
281 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
282 os::trace_page_sizes_for_requested_size("Mark Bitmap",
283 bitmap_size_orig, bitmap.page_size(), bitmap_page_size,
284 bitmap.base(),
285 bitmap.size());
286 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
287 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
288 _bitmap_region_special = bitmap.special();
289
290 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
291 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
292 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
293 if (!_bitmap_region_special) {
294 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
295 "Cannot commit bitmap memory");
296 }
297
298 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
299
300 if (ShenandoahVerify) {
301 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
302 os::trace_page_sizes_for_requested_size("Verify Bitmap",
303 bitmap_size_orig, verify_bitmap.page_size(), bitmap_page_size,
304 verify_bitmap.base(),
305 verify_bitmap.size());
306 if (!verify_bitmap.special()) {
307 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
308 "Cannot commit verification bitmap memory");
309 }
310 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
311 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
312 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
313 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
314 }
315
316 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
317 size_t aux_bitmap_page_size = bitmap_page_size;
318
319 ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
320 os::trace_page_sizes_for_requested_size("Aux Bitmap",
321 bitmap_size_orig, aux_bitmap.page_size(), aux_bitmap_page_size,
322 aux_bitmap.base(), aux_bitmap.size());
323 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
324 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
325 _aux_bitmap_region_special = aux_bitmap.special();
326 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
327
328 //
329 // Create regions and region sets
330 //
331 size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
332 size_t region_storage_size_orig = region_align * _num_regions;
333 size_t region_storage_size = align_up(region_storage_size_orig,
334 MAX2(region_page_size, os::vm_allocation_granularity()));
335
336 ReservedSpace region_storage(region_storage_size, region_page_size);
337 os::trace_page_sizes_for_requested_size("Region Storage",
338 region_storage_size_orig, region_storage.page_size(), region_page_size,
339 region_storage.base(), region_storage.size());
340 MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
341 if (!region_storage.special()) {
342 os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
343 "Cannot commit region memory");
344 }
345
346 // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
347 // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
348 // If not successful, bite a bullet and allocate at whatever address.
349 {
350 const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
351 const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
352 const size_t cset_page_size = os::vm_page_size();
353
354 uintptr_t min = round_up_power_of_2(cset_align);
355 uintptr_t max = (1u << 30u);
356 ReservedSpace cset_rs;
357
358 for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
359 char* req_addr = (char*)addr;
360 assert(is_aligned(req_addr, cset_align), "Should be aligned");
361 cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
362 if (cset_rs.is_reserved()) {
363 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
364 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
365 break;
366 }
367 }
368
369 if (_collection_set == nullptr) {
370 cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
371 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
372 }
373 os::trace_page_sizes_for_requested_size("Collection Set",
374 cset_size, cset_rs.page_size(), cset_page_size,
375 cset_rs.base(),
376 cset_rs.size());
377 }
378
379 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
380 _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
381 _free_set = new ShenandoahFreeSet(this, _num_regions);
382
383 {
384 ShenandoahHeapLocker locker(lock());
385
386 for (size_t i = 0; i < _num_regions; i++) {
387 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
388 bool is_committed = i < num_committed_regions;
389 void* loc = region_storage.base() + i * region_align;
390
391 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
392 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
393
394 _marking_context->initialize_top_at_mark_start(r);
395 _regions[i] = r;
396 assert(!collection_set()->is_in(i), "New region should not be in collection set");
397
398 _affiliations[i] = ShenandoahAffiliation::FREE;
399 }
400
401 // Initialize to complete
402 _marking_context->mark_complete();
403 size_t young_cset_regions, old_cset_regions;
404
405 // We are initializing free set. We ignore cset region tallies.
406 size_t first_old, last_old, num_old;
407 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
408 _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
409 }
410
411 if (AlwaysPreTouch) {
412 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
413 // before initialize() below zeroes it with initializing thread. For any given region,
414 // we touch the region and the corresponding bitmaps from the same thread.
415 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
416
417 _pretouch_heap_page_size = heap_page_size;
418 _pretouch_bitmap_page_size = bitmap_page_size;
419
420 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
421 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
422
423 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
424 _workers->run_task(&bcl);
425
426 ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
427 _workers->run_task(&hcl);
428 }
429
430 //
431 // Initialize the rest of GC subsystems
432 //
433
434 _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
435 for (uint worker = 0; worker < _max_workers; worker++) {
436 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
437 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
438 }
439
440 // There should probably be Shenandoah-specific options for these,
441 // just as there are G1-specific options.
442 {
443 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
444 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
445 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
446 }
447
448 _monitoring_support = new ShenandoahMonitoringSupport(this);
449 _phase_timings = new ShenandoahPhaseTimings(max_workers());
450 ShenandoahCodeRoots::initialize();
451
452 if (ShenandoahPacing) {
453 _pacer = new ShenandoahPacer(this);
454 _pacer->setup_for_idle();
455 }
456
457 initialize_controller();
458
459 if (ShenandoahUncommit) {
460 _uncommit_thread = new ShenandoahUncommitThread(this);
461 }
462
463 print_init_logger();
464
465 return JNI_OK;
466 }
467
468 void ShenandoahHeap::initialize_controller() {
469 _control_thread = new ShenandoahControlThread();
470 }
471
472 void ShenandoahHeap::print_init_logger() const {
473 ShenandoahInitLogger::print();
474 }
475
476 void ShenandoahHeap::initialize_mode() {
477 if (ShenandoahGCMode != nullptr) {
478 if (strcmp(ShenandoahGCMode, "satb") == 0) {
479 _gc_mode = new ShenandoahSATBMode();
480 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
481 _gc_mode = new ShenandoahPassiveMode();
482 } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
483 _gc_mode = new ShenandoahGenerationalMode();
484 } else {
485 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
486 }
487 } else {
488 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
489 }
490 _gc_mode->initialize_flags();
491 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
492 vm_exit_during_initialization(
493 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
494 _gc_mode->name()));
495 }
496 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
497 vm_exit_during_initialization(
498 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
499 _gc_mode->name()));
500 }
501 }
502
503 void ShenandoahHeap::initialize_heuristics() {
504 _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
505 _global_generation->initialize_heuristics(mode());
506 }
507
508 #ifdef _MSC_VER
509 #pragma warning( push )
510 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
511 #endif
512
513 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
514 CollectedHeap(),
515 _gc_generation(nullptr),
516 _active_generation(nullptr),
517 _initial_size(0),
518 _committed(0),
519 _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
520 _workers(nullptr),
521 _safepoint_workers(nullptr),
522 _heap_region_special(false),
523 _num_regions(0),
524 _regions(nullptr),
525 _affiliations(nullptr),
526 _gc_state_changed(false),
527 _gc_no_progress_count(0),
528 _cancel_requested_time(0),
529 _update_refs_iterator(this),
530 _global_generation(nullptr),
531 _control_thread(nullptr),
532 _uncommit_thread(nullptr),
533 _young_generation(nullptr),
534 _old_generation(nullptr),
535 _shenandoah_policy(policy),
536 _gc_mode(nullptr),
537 _free_set(nullptr),
538 _pacer(nullptr),
539 _verifier(nullptr),
540 _phase_timings(nullptr),
541 _monitoring_support(nullptr),
542 _memory_pool(nullptr),
543 _stw_memory_manager("Shenandoah Pauses"),
544 _cycle_memory_manager("Shenandoah Cycles"),
545 _gc_timer(new ConcurrentGCTimer()),
546 _soft_ref_policy(),
547 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
548 _marking_context(nullptr),
549 _bitmap_size(0),
550 _bitmap_regions_per_slice(0),
551 _bitmap_bytes_per_slice(0),
552 _bitmap_region_special(false),
553 _aux_bitmap_region_special(false),
554 _liveness_cache(nullptr),
555 _collection_set(nullptr)
556 {
557 // Initialize GC mode early, many subsequent initialization procedures depend on it
558 initialize_mode();
559 _cancelled_gc.set(GCCause::_no_gc);
560 }
561
562 #ifdef _MSC_VER
563 #pragma warning( pop )
564 #endif
565
566 void ShenandoahHeap::print_on(outputStream* st) const {
567 st->print_cr("Shenandoah Heap");
568 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
569 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
570 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
571 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
572 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
573 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
574 num_regions(),
575 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
576 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
577
578 st->print("Status: ");
579 if (has_forwarded_objects()) st->print("has forwarded objects, ");
580 if (!mode()->is_generational()) {
581 if (is_concurrent_mark_in_progress()) st->print("marking,");
582 } else {
583 if (is_concurrent_old_mark_in_progress()) st->print("old marking, ");
584 if (is_concurrent_young_mark_in_progress()) st->print("young marking, ");
585 }
586 if (is_evacuation_in_progress()) st->print("evacuating, ");
587 if (is_update_refs_in_progress()) st->print("updating refs, ");
588 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
589 if (is_full_gc_in_progress()) st->print("full gc, ");
590 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
591 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
592 if (is_concurrent_strong_root_in_progress() &&
593 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
594
595 if (cancelled_gc()) {
596 st->print("cancelled");
597 } else {
598 st->print("not cancelled");
599 }
600 st->cr();
601
602 st->print_cr("Reserved region:");
603 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
604 p2i(reserved_region().start()),
605 p2i(reserved_region().end()));
609 if (cset != nullptr) {
610 st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
611 st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
612 } else {
613 st->print_cr(" (null)");
614 }
615
616 st->cr();
617 MetaspaceUtils::print_on(st);
618
619 if (Verbose) {
620 st->cr();
621 print_heap_regions_on(st);
622 }
623 }
624
625 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
626 public:
627 void do_thread(Thread* thread) {
628 assert(thread != nullptr, "Sanity");
629 ShenandoahThreadLocalData::initialize_gclab(thread);
630 }
631 };
632
633 void ShenandoahHeap::post_initialize() {
634 CollectedHeap::post_initialize();
635
636 // Schedule periodic task to report on gc thread CPU utilization
637 _mmu_tracker.initialize();
638
639 MutexLocker ml(Threads_lock);
640
641 ShenandoahInitWorkerGCLABClosure init_gclabs;
642 _workers->threads_do(&init_gclabs);
643
644 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
645 // Now, we will let WorkerThreads to initialize gclab when new worker is created.
646 _workers->set_initialize_gclab();
647
648 // Note that the safepoint workers may require gclabs if the threads are used to create a heap dump
649 // during a concurrent evacuation phase.
650 if (_safepoint_workers != nullptr) {
651 _safepoint_workers->threads_do(&init_gclabs);
652 _safepoint_workers->set_initialize_gclab();
653 }
654
655 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
656 }
657
658 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
659 return _global_generation->heuristics();
660 }
661
662 size_t ShenandoahHeap::used() const {
663 return global_generation()->used();
664 }
665
666 size_t ShenandoahHeap::committed() const {
667 return Atomic::load(&_committed);
668 }
669
670 void ShenandoahHeap::increase_committed(size_t bytes) {
671 shenandoah_assert_heaplocked_or_safepoint();
672 _committed += bytes;
673 }
674
675 void ShenandoahHeap::decrease_committed(size_t bytes) {
676 shenandoah_assert_heaplocked_or_safepoint();
677 _committed -= bytes;
678 }
679
680 // For tracking usage based on allocations, it should be the case that:
681 // * The sum of regions::used == heap::used
682 // * The sum of a generation's regions::used == generation::used
683 // * The sum of a generation's humongous regions::free == generation::humongous_waste
684 // These invariants are checked by the verifier on GC safepoints.
685 //
686 // Additional notes:
687 // * When a mutator's allocation request causes a region to be retired, the
688 // free memory left in that region is considered waste. It does not contribute
689 // to the usage, but it _does_ contribute to allocation rate.
690 // * The bottom of a PLAB must be aligned on card size. In some cases this will
691 // require padding in front of the PLAB (a filler object). Because this padding
692 // is included in the region's used memory we include the padding in the usage
693 // accounting as waste.
694 // * Mutator allocations are used to compute an allocation rate. They are also
695 // sent to the Pacer for those purposes.
696 // * There are three sources of waste:
697 // 1. The padding used to align a PLAB on card size
698 // 2. Region's free is less than minimum TLAB size and is retired
699 // 3. The unused portion of memory in the last region of a humongous object
700 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
701 size_t actual_bytes = req.actual_size() * HeapWordSize;
702 size_t wasted_bytes = req.waste() * HeapWordSize;
703 ShenandoahGeneration* generation = generation_for(req.affiliation());
704
705 if (req.is_gc_alloc()) {
706 assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
707 increase_used(generation, actual_bytes + wasted_bytes);
708 } else {
709 assert(req.is_mutator_alloc(), "Expected mutator alloc here");
710 // padding and actual size both count towards allocation counter
711 generation->increase_allocated(actual_bytes + wasted_bytes);
712
713 // only actual size counts toward usage for mutator allocations
714 increase_used(generation, actual_bytes);
715
716 // notify pacer of both actual size and waste
717 notify_mutator_alloc_words(req.actual_size(), req.waste());
718
719 if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
720 increase_humongous_waste(generation,wasted_bytes);
721 }
722 }
723 }
724
725 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
726 generation->increase_humongous_waste(bytes);
727 if (!generation->is_global()) {
728 global_generation()->increase_humongous_waste(bytes);
729 }
730 }
731
732 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
733 generation->decrease_humongous_waste(bytes);
734 if (!generation->is_global()) {
735 global_generation()->decrease_humongous_waste(bytes);
736 }
737 }
738
739 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
740 generation->increase_used(bytes);
741 if (!generation->is_global()) {
742 global_generation()->increase_used(bytes);
743 }
744 }
745
746 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
747 generation->decrease_used(bytes);
748 if (!generation->is_global()) {
749 global_generation()->decrease_used(bytes);
750 }
751 }
752
753 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
754 if (ShenandoahPacing) {
755 control_thread()->pacing_notify_alloc(words);
756 if (waste > 0) {
757 pacer()->claim_for_alloc<true>(waste);
758 }
759 }
760 }
761
762 size_t ShenandoahHeap::capacity() const {
763 return committed();
764 }
765
766 size_t ShenandoahHeap::max_capacity() const {
767 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
768 }
769
770 size_t ShenandoahHeap::soft_max_capacity() const {
771 size_t v = Atomic::load(&_soft_max_size);
772 assert(min_capacity() <= v && v <= max_capacity(),
773 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
774 min_capacity(), v, max_capacity());
775 return v;
776 }
777
778 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
779 assert(min_capacity() <= v && v <= max_capacity(),
780 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
781 min_capacity(), v, max_capacity());
782 Atomic::store(&_soft_max_size, v);
783 }
784
785 size_t ShenandoahHeap::min_capacity() const {
786 return _minimum_size;
787 }
788
789 size_t ShenandoahHeap::initial_capacity() const {
790 return _initial_size;
791 }
792
793 bool ShenandoahHeap::is_in(const void* p) const {
794 if (!is_in_reserved(p)) {
795 return false;
796 }
797
798 if (is_full_gc_move_in_progress()) {
799 // Full GC move is running, we do not have a consistent region
800 // information yet. But we know the pointer is in heap.
801 return true;
802 }
803
804 // Now check if we point to a live section in active region.
805 const ShenandoahHeapRegion* r = heap_region_containing(p);
806 if (p >= r->top()) {
807 return false;
808 }
809
810 if (r->is_active()) {
811 return true;
812 }
813
814 // The region is trash, but won't be recycled until after concurrent weak
815 // roots. We also don't allow mutators to allocate from trash regions
816 // during weak roots. Concurrent class unloading may access unmarked oops
817 // in trash regions.
818 return r->is_trash() && is_concurrent_weak_root_in_progress();
819 }
820
821 void ShenandoahHeap::notify_soft_max_changed() {
822 if (_uncommit_thread != nullptr) {
823 _uncommit_thread->notify_soft_max_changed();
824 }
825 }
826
827 void ShenandoahHeap::notify_explicit_gc_requested() {
828 if (_uncommit_thread != nullptr) {
829 _uncommit_thread->notify_explicit_gc_requested();
830 }
831 }
832
833 bool ShenandoahHeap::check_soft_max_changed() {
834 size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
835 size_t old_soft_max = soft_max_capacity();
836 if (new_soft_max != old_soft_max) {
837 new_soft_max = MAX2(min_capacity(), new_soft_max);
838 new_soft_max = MIN2(max_capacity(), new_soft_max);
839 if (new_soft_max != old_soft_max) {
840 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
841 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
842 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
843 );
844 set_soft_max_capacity(new_soft_max);
845 return true;
846 }
847 }
848 return false;
849 }
850
851 void ShenandoahHeap::notify_heap_changed() {
852 // Update monitoring counters when we took a new region. This amortizes the
853 // update costs on slow path.
854 monitoring_support()->notify_heap_changed();
855 _heap_changed.try_set();
856 }
857
858 void ShenandoahHeap::set_forced_counters_update(bool value) {
859 monitoring_support()->set_forced_counters_update(value);
860 }
861
862 void ShenandoahHeap::handle_force_counters_update() {
863 monitoring_support()->handle_force_counters_update();
864 }
865
866 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
867 // New object should fit the GCLAB size
868 size_t min_size = MAX2(size, PLAB::min_size());
869
870 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
871 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
872
873 new_size = MIN2(new_size, PLAB::max_size());
874 new_size = MAX2(new_size, PLAB::min_size());
875
876 // Record new heuristic value even if we take any shortcut. This captures
877 // the case when moderately-sized objects always take a shortcut. At some point,
878 // heuristics should catch up with them.
879 log_debug(gc, free)("Set new GCLAB size: " SIZE_FORMAT, new_size);
880 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
881
882 if (new_size < size) {
883 // New size still does not fit the object. Fall back to shared allocation.
884 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
885 log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
886 return nullptr;
887 }
888
889 // Retire current GCLAB, and allocate a new one.
890 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
891 gclab->retire();
892
893 size_t actual_size = 0;
894 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
895 if (gclab_buf == nullptr) {
896 return nullptr;
897 }
898
899 assert (size <= actual_size, "allocation should fit");
900
901 // ...and clear or zap just allocated TLAB, if needed.
902 if (ZeroTLAB) {
903 Copy::zero_to_words(gclab_buf, actual_size);
904 } else if (ZapTLAB) {
905 // Skip mangling the space corresponding to the object header to
906 // ensure that the returned space is not considered parsable by
907 // any concurrent GC thread.
908 size_t hdr_size = oopDesc::header_size();
909 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
910 }
911 gclab->set_buf(gclab_buf, actual_size);
912 return gclab->allocate(size);
913 }
914
915 // Called from stubs in JIT code or interpreter
916 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
917 size_t requested_size,
918 size_t* actual_size) {
919 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
920 HeapWord* res = allocate_memory(req);
921 if (res != nullptr) {
922 *actual_size = req.actual_size();
923 } else {
924 *actual_size = 0;
925 }
926 return res;
927 }
928
929 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
930 size_t word_size,
931 size_t* actual_size) {
932 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
933 HeapWord* res = allocate_memory(req);
934 if (res != nullptr) {
935 *actual_size = req.actual_size();
937 *actual_size = 0;
938 }
939 return res;
940 }
941
942 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
943 intptr_t pacer_epoch = 0;
944 bool in_new_region = false;
945 HeapWord* result = nullptr;
946
947 if (req.is_mutator_alloc()) {
948 if (ShenandoahPacing) {
949 pacer()->pace_for_alloc(req.size());
950 pacer_epoch = pacer()->epoch();
951 }
952
953 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
954 result = allocate_memory_under_lock(req, in_new_region);
955 }
956
957 // Check that gc overhead is not exceeded.
958 //
959 // Shenandoah will grind along for quite a while allocating one
960 // object at a time using shared (non-tlab) allocations. This check
961 // is testing that the GC overhead limit has not been exceeded.
962 // This will notify the collector to start a cycle, but will raise
963 // an OOME to the mutator if the last Full GCs have not made progress.
964 // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
965 if ((result == nullptr) && !req.is_lab_alloc() && (get_gc_no_progress_count() > ShenandoahNoProgressThreshold)) {
966 control_thread()->handle_alloc_failure(req, false);
967 req.set_actual_size(0);
968 return nullptr;
969 }
970
971 if (result == nullptr) {
972 // Block until control thread reacted, then retry allocation.
973 //
974 // It might happen that one of the threads requesting allocation would unblock
975 // way later after GC happened, only to fail the second allocation, because
976 // other threads have already depleted the free storage. In this case, a better
977 // strategy is to try again, until at least one full GC has completed.
978 //
979 // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
980 // a) We experienced a GC that had good progress, or
981 // b) We experienced at least one Full GC (whether or not it had good progress)
982
983 const size_t original_count = shenandoah_policy()->full_gc_count();
984 while (result == nullptr && should_retry_allocation(original_count)) {
985 control_thread()->handle_alloc_failure(req, true);
986 result = allocate_memory_under_lock(req, in_new_region);
987 }
988 if (result != nullptr) {
989 // If our allocation request has been satisfied after it initially failed, we count this as good gc progress
990 notify_gc_progress();
991 }
992 if (log_develop_is_enabled(Debug, gc, alloc)) {
993 ResourceMark rm;
994 log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
995 ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
996 Thread::current()->name(), p2i(result), req.type_string(), req.size(),
997 original_count, get_gc_no_progress_count());
998 }
999 }
1000 } else {
1001 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1002 result = allocate_memory_under_lock(req, in_new_region);
1003 // Do not call handle_alloc_failure() here, because we cannot block.
1004 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1005 }
1006
1007 if (in_new_region) {
1008 notify_heap_changed();
1009 }
1010
1011 if (result == nullptr) {
1012 req.set_actual_size(0);
1013 }
1014
1015 // This is called regardless of the outcome of the allocation to account
1016 // for any waste created by retiring regions with this request.
1017 increase_used(req);
1018
1019 if (result != nullptr) {
1020 size_t requested = req.size();
1021 size_t actual = req.actual_size();
1022
1023 assert (req.is_lab_alloc() || (requested == actual),
1024 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1025 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1026
1027 if (req.is_mutator_alloc()) {
1028 // If we requested more than we were granted, give the rest back to pacer.
1029 // This only matters if we are in the same pacing epoch: do not try to unpace
1030 // over the budget for the other phase.
1031 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1032 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1033 }
1034 }
1035 }
1036
1037 return result;
1038 }
1039
1040 inline bool ShenandoahHeap::should_retry_allocation(size_t original_full_gc_count) const {
1041 return shenandoah_policy()->full_gc_count() == original_full_gc_count
1042 && !shenandoah_policy()->is_at_shutdown();
1043 }
1044
1045 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1046 // If we are dealing with mutator allocation, then we may need to block for safepoint.
1047 // We cannot block for safepoint for GC allocations, because there is a high chance
1048 // we are already running at safepoint or from stack watermark machinery, and we cannot
1049 // block again.
1050 ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1051
1052 // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1053 if (req.is_old() && !old_generation()->can_allocate(req)) {
1054 return nullptr;
1055 }
1056
1057 // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1058 // memory.
1059 HeapWord* result = _free_set->allocate(req, in_new_region);
1060
1061 // Record the plab configuration for this result and register the object.
1062 if (result != nullptr && req.is_old()) {
1063 old_generation()->configure_plab_for_current_thread(req);
1064 if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1065 // Register the newly allocated object while we're holding the global lock since there's no synchronization
1066 // built in to the implementation of register_object(). There are potential races when multiple independent
1067 // threads are allocating objects, some of which might span the same card region. For example, consider
1068 // a card table's memory region within which three objects are being allocated by three different threads:
1069 //
1070 // objects being "concurrently" allocated:
1071 // [-----a------][-----b-----][--------------c------------------]
1072 // [---- card table memory range --------------]
1073 //
1074 // Before any objects are allocated, this card's memory range holds no objects. Note that allocation of object a
1075 // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1076 // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1077 // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1078 // card region.
1079 //
1080 // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1081 // last-start representing object b while first-start represents object c. This is why we need to require all
1082 // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1083 old_generation()->card_scan()->register_object(result);
1084 }
1085 }
1086
1087 return result;
1088 }
1089
1090 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1091 bool* gc_overhead_limit_was_exceeded) {
1092 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1093 return allocate_memory(req);
1094 }
1095
1096 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1097 size_t size,
1098 Metaspace::MetadataType mdtype) {
1099 MetaWord* result;
1100
1101 // Inform metaspace OOM to GC heuristics if class unloading is possible.
1102 ShenandoahHeuristics* h = global_generation()->heuristics();
1103 if (h->can_unload_classes()) {
1104 h->record_metaspace_oom();
1105 }
1106
1107 // Expand and retry allocation
1108 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1109 if (result != nullptr) {
1110 return result;
1111 }
1112
1113 // Start full GC
1114 collect(GCCause::_metadata_GC_clear_soft_refs);
1115
1116 // Retry allocation
1117 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1118 if (result != nullptr) {
1119 return result;
1120 }
1121
1122 // Expand and retry allocation
1123 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1175
1176 private:
1177 void do_work() {
1178 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1179 ShenandoahHeapRegion* r;
1180 while ((r =_cs->claim_next()) != nullptr) {
1181 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1182 _sh->marked_object_iterate(r, &cl);
1183
1184 if (ShenandoahPacing) {
1185 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1186 }
1187
1188 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1189 break;
1190 }
1191 }
1192 }
1193 };
1194
1195 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1196 private:
1197 bool const _resize;
1198 public:
1199 explicit ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1200 void do_thread(Thread* thread) override {
1201 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1202 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1203 gclab->retire();
1204 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1205 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1206 }
1207
1208 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1209 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1210 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1211
1212 // There are two reasons to retire all plabs between old-gen evacuation passes.
1213 // 1. We need to make the plab memory parsable by remembered-set scanning.
1214 // 2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1215 ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1216 if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1217 ShenandoahThreadLocalData::set_plab_size(thread, 0);
1218 }
1219 }
1220 }
1221 };
1222
1223 class ShenandoahGCStatePropagator : public HandshakeClosure {
1224 public:
1225 explicit ShenandoahGCStatePropagator(char gc_state) :
1226 HandshakeClosure("Shenandoah GC State Change"),
1227 _gc_state(gc_state) {}
1228
1229 void do_thread(Thread* thread) override {
1230 ShenandoahThreadLocalData::set_gc_state(thread, _gc_state);
1231 }
1232 private:
1233 char _gc_state;
1234 };
1235
1236 class ShenandoahPrepareForUpdateRefs : public HandshakeClosure {
1237 public:
1238 explicit ShenandoahPrepareForUpdateRefs(char gc_state) :
1239 HandshakeClosure("Shenandoah Prepare for Update Refs"),
1240 _retire(ResizeTLAB), _propagator(gc_state) {}
1241
1242 void do_thread(Thread* thread) override {
1243 _propagator.do_thread(thread);
1244 if (ShenandoahThreadLocalData::gclab(thread) != nullptr) {
1245 _retire.do_thread(thread);
1246 }
1247 }
1248 private:
1249 ShenandoahRetireGCLABClosure _retire;
1250 ShenandoahGCStatePropagator _propagator;
1251 };
1252
1253 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1254 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1255 workers()->run_task(&task);
1256 }
1257
1258 void ShenandoahHeap::concurrent_prepare_for_update_refs() {
1259 {
1260 // Java threads take this lock while they are being attached and added to the list of thread.
1261 // If another thread holds this lock before we update the gc state, it will receive a stale
1262 // gc state, but they will have been added to the list of java threads and so will be corrected
1263 // by the following handshake.
1264 MutexLocker lock(Threads_lock);
1265
1266 // A cancellation at this point means the degenerated cycle must resume from update-refs.
1267 set_gc_state_concurrent(EVACUATION, false);
1268 set_gc_state_concurrent(WEAK_ROOTS, false);
1269 set_gc_state_concurrent(UPDATE_REFS, true);
1270 }
1271
1272 // This will propagate the gc state and retire gclabs and plabs for threads that require it.
1273 ShenandoahPrepareForUpdateRefs prepare_for_update_refs(_gc_state.raw_value());
1274
1275 // The handshake won't touch worker threads (or control thread, or VM thread), so do those separately.
1276 Threads::non_java_threads_do(&prepare_for_update_refs);
1277
1278 // Now retire gclabs and plabs and propagate gc_state for mutator threads
1279 Handshake::execute(&prepare_for_update_refs);
1280
1281 _update_refs_iterator.reset();
1282 }
1283
1284 class ShenandoahCompositeHandshakeClosure : public HandshakeClosure {
1285 HandshakeClosure* _handshake_1;
1286 HandshakeClosure* _handshake_2;
1287 public:
1288 ShenandoahCompositeHandshakeClosure(HandshakeClosure* handshake_1, HandshakeClosure* handshake_2) :
1289 HandshakeClosure(handshake_2->name()),
1290 _handshake_1(handshake_1), _handshake_2(handshake_2) {}
1291
1292 void do_thread(Thread* thread) override {
1293 _handshake_1->do_thread(thread);
1294 _handshake_2->do_thread(thread);
1295 }
1296 };
1297
1298 void ShenandoahHeap::concurrent_final_roots(HandshakeClosure* handshake_closure) {
1299 {
1300 assert(!is_evacuation_in_progress(), "Should not evacuate for abbreviated or old cycles");
1301 MutexLocker lock(Threads_lock);
1302 set_gc_state_concurrent(WEAK_ROOTS, false);
1303 }
1304
1305 ShenandoahGCStatePropagator propagator(_gc_state.raw_value());
1306 Threads::non_java_threads_do(&propagator);
1307 if (handshake_closure == nullptr) {
1308 Handshake::execute(&propagator);
1309 } else {
1310 ShenandoahCompositeHandshakeClosure composite(&propagator, handshake_closure);
1311 Handshake::execute(&composite);
1312 }
1313 }
1314
1315 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1316 assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1317 if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1318 // This thread went through the OOM during evac protocol. It is safe to return
1319 // the forward pointer. It must not attempt to evacuate any other objects.
1320 return ShenandoahBarrierSet::resolve_forwarded(p);
1321 }
1322
1323 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1324
1325 ShenandoahHeapRegion* r = heap_region_containing(p);
1326 assert(!r->is_humongous(), "never evacuate humongous objects");
1327
1328 ShenandoahAffiliation target_gen = r->affiliation();
1329 return try_evacuate_object(p, thread, r, target_gen);
1330 }
1331
1332 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1333 ShenandoahAffiliation target_gen) {
1334 assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1335 assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1336 bool alloc_from_lab = true;
1337 HeapWord* copy = nullptr;
1338 size_t size = p->size();
1339
1340 #ifdef ASSERT
1341 if (ShenandoahOOMDuringEvacALot &&
1342 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1343 copy = nullptr;
1344 } else {
1345 #endif
1346 if (UseTLAB) {
1347 copy = allocate_from_gclab(thread, size);
1348 }
1349 if (copy == nullptr) {
1350 // If we failed to allocate in LAB, we'll try a shared allocation.
1351 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1352 copy = allocate_memory(req);
1353 alloc_from_lab = false;
1354 }
1355 #ifdef ASSERT
1356 }
1357 #endif
1358
1359 if (copy == nullptr) {
1360 control_thread()->handle_alloc_failure_evac(size);
1361
1362 _oom_evac_handler.handle_out_of_memory_during_evacuation();
1363
1364 return ShenandoahBarrierSet::resolve_forwarded(p);
1365 }
1366
1367 // Copy the object:
1368 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1369
1370 // Try to install the new forwarding pointer.
1371 oop copy_val = cast_to_oop(copy);
1372 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1373 if (result == copy_val) {
1374 // Successfully evacuated. Our copy is now the public one!
1375 ContinuationGCSupport::relativize_stack_chunk(copy_val);
1376 shenandoah_assert_correct(nullptr, copy_val);
1377 return copy_val;
1378 } else {
1379 // Failed to evacuate. We need to deal with the object that is left behind. Since this
1380 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1381 // But if it happens to contain references to evacuated regions, those references would
1382 // not get updated for this stale copy during this cycle, and we will crash while scanning
1383 // it the next cycle.
1384 if (alloc_from_lab) {
1385 // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1386 // object will overwrite this stale copy, or the filler object on LAB retirement will
1387 // do this.
1388 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1389 } else {
1390 // For non-LAB allocations, we have no way to retract the allocation, and
1391 // have to explicitly overwrite the copy with the filler object. With that overwrite,
1392 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1393 assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1394 fill_with_object(copy, size);
1395 shenandoah_assert_correct(nullptr, copy_val);
1396 // For non-LAB allocations, the object has already been registered
1397 }
1398 shenandoah_assert_correct(nullptr, result);
1399 return result;
1400 }
1401 }
1402
1403 void ShenandoahHeap::trash_cset_regions() {
1404 ShenandoahHeapLocker locker(lock());
1405
1406 ShenandoahCollectionSet* set = collection_set();
1407 ShenandoahHeapRegion* r;
1408 set->clear_current_index();
1409 while ((r = set->next()) != nullptr) {
1410 r->make_trash();
1411 }
1412 collection_set()->clear();
1413 }
1414
1415 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1416 st->print_cr("Heap Regions:");
1417 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1418 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1419 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1420 st->print_cr("UWM=update watermark, U=used");
1421 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1422 st->print_cr("S=shared allocs, L=live data");
1423 st->print_cr("CP=critical pins");
1424
1425 for (size_t i = 0; i < num_regions(); i++) {
1426 get_region(i)->print_on(st);
1427 }
1428 }
1429
1430 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1431 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1432
1433 oop humongous_obj = cast_to_oop(start->bottom());
1434 size_t size = humongous_obj->size();
1435 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1436 size_t index = start->index() + required_regions - 1;
1437
1438 assert(!start->has_live(), "liveness must be zero");
1439
1440 for(size_t i = 0; i < required_regions; i++) {
1441 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1442 // as it expects that every region belongs to a humongous region starting with a humongous start region.
1443 ShenandoahHeapRegion* region = get_region(index --);
1444
1445 assert(region->is_humongous(), "expect correct humongous start or continuation");
1446 assert(!region->is_cset(), "Humongous region should not be in collection set");
1447
1448 region->make_trash_immediate();
1449 }
1450 return required_regions;
1451 }
1452
1453 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1454 public:
1455 ShenandoahCheckCleanGCLABClosure() {}
1456 void do_thread(Thread* thread) {
1457 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1458 assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1459 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1460
1461 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1462 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1463 assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1464 assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1465 }
1466 }
1467 };
1468
1469 void ShenandoahHeap::labs_make_parsable() {
1470 assert(UseTLAB, "Only call with UseTLAB");
1471
1472 ShenandoahRetireGCLABClosure cl(false);
1473
1474 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1475 ThreadLocalAllocBuffer& tlab = t->tlab();
1476 tlab.make_parsable();
1477 cl.do_thread(t);
1478 }
1479
1480 workers()->threads_do(&cl);
1481
1482 if (safepoint_workers() != nullptr) {
1483 safepoint_workers()->threads_do(&cl);
1484 }
1485 }
1486
1487 void ShenandoahHeap::tlabs_retire(bool resize) {
1488 assert(UseTLAB, "Only call with UseTLAB");
1489 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1490
1491 ThreadLocalAllocStats stats;
1492
1493 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1494 ThreadLocalAllocBuffer& tlab = t->tlab();
1495 tlab.retire(&stats);
1496 if (resize) {
1497 tlab.resize();
1498 }
1499 }
1500
1501 stats.publish();
1502
1503 #ifdef ASSERT
1504 ShenandoahCheckCleanGCLABClosure cl;
1505 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1506 cl.do_thread(t);
1507 }
1508 workers()->threads_do(&cl);
1509 #endif
1510 }
1511
1512 void ShenandoahHeap::gclabs_retire(bool resize) {
1513 assert(UseTLAB, "Only call with UseTLAB");
1514 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1515
1516 ShenandoahRetireGCLABClosure cl(resize);
1517 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1518 cl.do_thread(t);
1519 }
1520
1521 workers()->threads_do(&cl);
1522
1523 if (safepoint_workers() != nullptr) {
1524 safepoint_workers()->threads_do(&cl);
1525 }
1526 }
1527
1528 // Returns size in bytes
1529 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1530 // Return the max allowed size, and let the allocation path
1531 // figure out the safe size for current allocation.
1532 return ShenandoahHeapRegion::max_tlab_size_bytes();
1533 }
1534
1535 size_t ShenandoahHeap::max_tlab_size() const {
1536 // Returns size in words
1537 return ShenandoahHeapRegion::max_tlab_size_words();
1538 }
1539
1540 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1564 }
1565 return nullptr;
1566 }
1567
1568 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1569 ShenandoahHeapRegion* r = heap_region_containing(addr);
1570 return r->block_is_obj(addr);
1571 }
1572
1573 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1574 return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1575 }
1576
1577 void ShenandoahHeap::prepare_for_verify() {
1578 if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1579 labs_make_parsable();
1580 }
1581 }
1582
1583 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1584 if (_shenandoah_policy->is_at_shutdown()) {
1585 return;
1586 }
1587
1588 if (_control_thread != nullptr) {
1589 tcl->do_thread(_control_thread);
1590 }
1591
1592 if (_uncommit_thread != nullptr) {
1593 tcl->do_thread(_uncommit_thread);
1594 }
1595
1596 workers()->threads_do(tcl);
1597 if (_safepoint_workers != nullptr) {
1598 _safepoint_workers->threads_do(tcl);
1599 }
1600 }
1601
1602 void ShenandoahHeap::print_tracing_info() const {
1603 LogTarget(Info, gc, stats) lt;
1604 if (lt.is_enabled()) {
1605 ResourceMark rm;
1606 LogStream ls(lt);
1607
1608 phase_timings()->print_global_on(&ls);
1609
1610 ls.cr();
1611 ls.cr();
1612
1613 shenandoah_policy()->print_gc_stats(&ls);
1614
1615 ls.cr();
1616 ls.cr();
1617 }
1618 }
1619
1620 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1621 shenandoah_assert_control_or_vm_thread_at_safepoint();
1622 _gc_generation = generation;
1623 }
1624
1625 // Active generation may only be set by the VM thread at a safepoint.
1626 void ShenandoahHeap::set_active_generation() {
1627 assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1628 assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1629 assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1630 _active_generation = _gc_generation;
1631 }
1632
1633 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1634 shenandoah_policy()->record_collection_cause(cause);
1635
1636 const GCCause::Cause current = gc_cause();
1637 assert(current == GCCause::_no_gc, "Over-writing cause: %s, with: %s",
1638 GCCause::to_string(current), GCCause::to_string(cause));
1639 assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1640
1641 set_gc_cause(cause);
1642 set_gc_generation(generation);
1643
1644 generation->heuristics()->record_cycle_start();
1645 }
1646
1647 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1648 assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1649 assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1650
1651 generation->heuristics()->record_cycle_end();
1652 if (mode()->is_generational() && generation->is_global()) {
1653 // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1654 young_generation()->heuristics()->record_cycle_end();
1655 old_generation()->heuristics()->record_cycle_end();
1656 }
1657
1658 set_gc_generation(nullptr);
1659 set_gc_cause(GCCause::_no_gc);
1660 }
1661
1662 void ShenandoahHeap::verify(VerifyOption vo) {
1663 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1664 if (ShenandoahVerify) {
1665 verifier()->verify_generic(vo);
1666 } else {
1667 // TODO: Consider allocating verification bitmaps on demand,
1668 // and turn this on unconditionally.
1669 }
1670 }
1671 }
1672 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1673 return _free_set->capacity();
1674 }
1675
1676 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1677 private:
1678 MarkBitMap* _bitmap;
1679 ShenandoahScanObjectStack* _oop_stack;
1680 ShenandoahHeap* const _heap;
1681 ShenandoahMarkingContext* const _marking_context;
1976 const uint active_workers = workers()->active_workers();
1977 const size_t n_regions = num_regions();
1978 size_t stride = ShenandoahParallelRegionStride;
1979 if (stride == 0 && active_workers > 1) {
1980 // Automatically derive the stride to balance the work between threads
1981 // evenly. Do not try to split work if below the reasonable threshold.
1982 constexpr size_t threshold = 4096;
1983 stride = n_regions <= threshold ?
1984 threshold :
1985 (n_regions + active_workers - 1) / active_workers;
1986 }
1987
1988 if (n_regions > stride && active_workers > 1) {
1989 ShenandoahParallelHeapRegionTask task(blk, stride);
1990 workers()->run_task(&task);
1991 } else {
1992 heap_region_iterate(blk);
1993 }
1994 }
1995
1996 class ShenandoahRendezvousClosure : public HandshakeClosure {
1997 public:
1998 inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1999 inline void do_thread(Thread* thread) {}
2000 };
2001
2002 void ShenandoahHeap::rendezvous_threads() {
2003 ShenandoahRendezvousClosure cl;
2004 Handshake::execute(&cl);
2005 }
2006
2007 void ShenandoahHeap::recycle_trash() {
2008 free_set()->recycle_trash();
2009 }
2010
2011 void ShenandoahHeap::do_class_unloading() {
2012 _unloader.unload();
2013 if (mode()->is_generational()) {
2014 old_generation()->set_parsable(false);
2015 }
2016 }
2017
2018 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
2019 // Weak refs processing
2020 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2021 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2022 ShenandoahTimingsTracker t(phase);
2023 ShenandoahGCWorkerPhase worker_phase(phase);
2024 shenandoah_assert_generations_reconciled();
2025 gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2026 }
2027
2028 void ShenandoahHeap::prepare_update_heap_references() {
2029 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2030
2031 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2032 // make them parsable for update code to work correctly. Plus, we can compute new sizes
2033 // for future GCLABs here.
2034 if (UseTLAB) {
2035 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2036 gclabs_retire(ResizeTLAB);
2037 }
2038
2039 _update_refs_iterator.reset();
2040 }
2041
2042 void ShenandoahHeap::propagate_gc_state_to_all_threads() {
2043 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2044 if (_gc_state_changed) {
2045 ShenandoahGCStatePropagator propagator(_gc_state.raw_value());
2046 Threads::threads_do(&propagator);
2047 _gc_state_changed = false;
2048 }
2049 }
2050
2051 void ShenandoahHeap::set_gc_state_at_safepoint(uint mask, bool value) {
2052 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2053 _gc_state.set_cond(mask, value);
2054 _gc_state_changed = true;
2055 }
2056
2057 void ShenandoahHeap::set_gc_state_concurrent(uint mask, bool value) {
2058 // Holding the thread lock here assures that any thread created after we change the gc
2059 // state will have the correct state. It also prevents attaching threads from seeing
2060 // an inconsistent state. See ShenandoahBarrierSet::on_thread_attach for reference. Established
2061 // threads will use their thread local copy of the gc state (changed by a handshake, or on a
2062 // safepoint).
2063 assert(Threads_lock->is_locked(), "Must hold thread lock for concurrent gc state change");
2064 _gc_state.set_cond(mask, value);
2065 }
2066
2067 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2068 uint mask;
2069 assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
2070 if (!in_progress && is_concurrent_old_mark_in_progress()) {
2071 assert(mode()->is_generational(), "Only generational GC has old marking");
2072 assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
2073 // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
2074 mask = YOUNG_MARKING;
2075 } else {
2076 mask = MARKING | YOUNG_MARKING;
2077 }
2078 set_gc_state_at_safepoint(mask, in_progress);
2079 manage_satb_barrier(in_progress);
2080 }
2081
2082 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2083 #ifdef ASSERT
2084 // has_forwarded_objects() iff UPDATE_REFS or EVACUATION
2085 bool has_forwarded = has_forwarded_objects();
2086 bool updating_or_evacuating = _gc_state.is_set(UPDATE_REFS | EVACUATION);
2087 bool evacuating = _gc_state.is_set(EVACUATION);
2088 assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
2089 "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
2090 #endif
2091 if (!in_progress && is_concurrent_young_mark_in_progress()) {
2092 // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2093 assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2094 set_gc_state_at_safepoint(OLD_MARKING, in_progress);
2095 } else {
2096 set_gc_state_at_safepoint(MARKING | OLD_MARKING, in_progress);
2097 }
2098 manage_satb_barrier(in_progress);
2099 }
2100
2101 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2102 return old_generation()->is_preparing_for_mark();
2103 }
2104
2105 void ShenandoahHeap::manage_satb_barrier(bool active) {
2106 if (is_concurrent_mark_in_progress()) {
2107 // Ignore request to deactivate barrier while concurrent mark is in progress.
2108 // Do not attempt to re-activate the barrier if it is already active.
2109 if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2110 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2111 }
2112 } else {
2113 // No concurrent marking is in progress so honor request to deactivate,
2114 // but only if the barrier is already active.
2115 if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2116 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2117 }
2118 }
2119 }
2120
2121 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2122 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2123 set_gc_state_at_safepoint(EVACUATION, in_progress);
2124 }
2125
2126 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2127 if (in_progress) {
2128 _concurrent_strong_root_in_progress.set();
2129 } else {
2130 _concurrent_strong_root_in_progress.unset();
2131 }
2132 }
2133
2134 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2135 set_gc_state_at_safepoint(WEAK_ROOTS, cond);
2136 }
2137
2138 GCTracer* ShenandoahHeap::tracer() {
2139 return shenandoah_policy()->tracer();
2140 }
2141
2142 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2143 return _free_set->used();
2144 }
2145
2146 bool ShenandoahHeap::try_cancel_gc(GCCause::Cause cause) {
2147 const GCCause::Cause prev = _cancelled_gc.xchg(cause);
2148 return prev == GCCause::_no_gc || prev == GCCause::_shenandoah_concurrent_gc;
2149 }
2150
2151 void ShenandoahHeap::cancel_concurrent_mark() {
2152 if (mode()->is_generational()) {
2153 young_generation()->cancel_marking();
2154 old_generation()->cancel_marking();
2155 }
2156
2157 global_generation()->cancel_marking();
2158
2159 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2160 }
2161
2162 bool ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2163 if (try_cancel_gc(cause)) {
2164 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2165 log_info(gc,thread)("%s", msg.buffer());
2166 Events::log(Thread::current(), "%s", msg.buffer());
2167 _cancel_requested_time = os::elapsedTime();
2168 return true;
2169 }
2170 return false;
2171 }
2172
2173 uint ShenandoahHeap::max_workers() {
2174 return _max_workers;
2175 }
2176
2177 void ShenandoahHeap::stop() {
2178 // The shutdown sequence should be able to terminate when GC is running.
2179
2180 // Step 0. Notify policy to disable event recording.
2181 _shenandoah_policy->record_shutdown();
2182
2183 // Step 1. Stop reporting on gc thread cpu utilization
2184 mmu_tracker()->stop();
2185
2186 // Step 2. Wait until GC worker exits normally (this will cancel any ongoing GC).
2187 control_thread()->stop();
2188
2189 // Stop 4. Shutdown uncommit thread.
2190 if (_uncommit_thread != nullptr) {
2191 _uncommit_thread->stop();
2192 }
2193 }
2194
2195 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2196 if (!unload_classes()) return;
2197 ClassUnloadingContext ctx(_workers->active_workers(),
2198 true /* unregister_nmethods_during_purge */,
2199 false /* lock_codeblob_free_separately */);
2200
2201 // Unload classes and purge SystemDictionary.
2202 {
2203 ShenandoahPhaseTimings::Phase phase = full_gc ?
2204 ShenandoahPhaseTimings::full_gc_purge_class_unload :
2205 ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2206 ShenandoahIsAliveSelector is_alive;
2207 {
2208 CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
2209 ShenandoahGCPhase gc_phase(phase);
2210 ShenandoahGCWorkerPhase worker_phase(phase);
2211 bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
2212
2213 uint num_workers = _workers->active_workers();
2214 ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
2215 _workers->run_task(&unlink_task);
2216 }
2217 // Release unloaded nmethods's memory.
2218 ClassUnloadingContext::context()->purge_and_free_nmethods();
2219 }
2220
2221 {
2222 ShenandoahGCPhase phase(full_gc ?
2223 ShenandoahPhaseTimings::full_gc_purge_cldg :
2224 ShenandoahPhaseTimings::degen_gc_purge_cldg);
2225 ClassLoaderDataGraph::purge(true /* at_safepoint */);
2226 }
2227 // Resize and verify metaspace
2228 MetaspaceGC::compute_new_size();
2229 DEBUG_ONLY(MetaspaceUtils::verify();)
2230 }
2231
2232 // Weak roots are either pre-evacuated (final mark) or updated (final update refs),
2233 // so they should not have forwarded oops.
2234 // However, we do need to "null" dead oops in the roots, if can not be done
2235 // in concurrent cycles.
2236 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2237 uint num_workers = _workers->active_workers();
2238 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2239 ShenandoahPhaseTimings::full_gc_purge_weak_par :
2240 ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2241 ShenandoahGCPhase phase(timing_phase);
2242 ShenandoahGCWorkerPhase worker_phase(timing_phase);
2243 // Cleanup weak roots
2244 if (has_forwarded_objects()) {
2245 ShenandoahForwardedIsAliveClosure is_alive;
2246 ShenandoahUpdateRefsClosure keep_alive;
2247 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2248 cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2249 _workers->run_task(&cleaning_task);
2250 } else {
2251 ShenandoahIsAliveClosure is_alive;
2252 #ifdef ASSERT
2256 #else
2257 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2258 cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2259 #endif
2260 _workers->run_task(&cleaning_task);
2261 }
2262 }
2263
2264 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2265 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2266 assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2267 ShenandoahGCPhase phase(full_gc ?
2268 ShenandoahPhaseTimings::full_gc_purge :
2269 ShenandoahPhaseTimings::degen_gc_purge);
2270 stw_weak_refs(full_gc);
2271 stw_process_weak_roots(full_gc);
2272 stw_unload_classes(full_gc);
2273 }
2274
2275 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2276 set_gc_state_at_safepoint(HAS_FORWARDED, cond);
2277 }
2278
2279 void ShenandoahHeap::set_unload_classes(bool uc) {
2280 _unload_classes.set_cond(uc);
2281 }
2282
2283 bool ShenandoahHeap::unload_classes() const {
2284 return _unload_classes.is_set();
2285 }
2286
2287 address ShenandoahHeap::in_cset_fast_test_addr() {
2288 ShenandoahHeap* heap = ShenandoahHeap::heap();
2289 assert(heap->collection_set() != nullptr, "Sanity");
2290 return (address) heap->collection_set()->biased_map_address();
2291 }
2292
2293 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2294 if (mode()->is_generational()) {
2295 young_generation()->reset_bytes_allocated_since_gc_start();
2296 old_generation()->reset_bytes_allocated_since_gc_start();
2297 }
2298
2299 global_generation()->reset_bytes_allocated_since_gc_start();
2300 }
2301
2302 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2303 _degenerated_gc_in_progress.set_cond(in_progress);
2304 }
2305
2306 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2307 _full_gc_in_progress.set_cond(in_progress);
2308 }
2309
2310 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2311 assert (is_full_gc_in_progress(), "should be");
2312 _full_gc_move_in_progress.set_cond(in_progress);
2313 }
2314
2315 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2316 set_gc_state_at_safepoint(UPDATE_REFS, in_progress);
2317 }
2318
2319 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2320 ShenandoahCodeRoots::register_nmethod(nm);
2321 }
2322
2323 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2324 ShenandoahCodeRoots::unregister_nmethod(nm);
2325 }
2326
2327 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2328 heap_region_containing(o)->record_pin();
2329 }
2330
2331 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2332 ShenandoahHeapRegion* r = heap_region_containing(o);
2333 assert(r != nullptr, "Sanity");
2334 assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
2335 r->record_unpin();
2336 }
2343 if (r->is_active()) {
2344 if (r->is_pinned()) {
2345 if (r->pin_count() == 0) {
2346 r->make_unpinned();
2347 }
2348 } else {
2349 if (r->pin_count() > 0) {
2350 r->make_pinned();
2351 }
2352 }
2353 }
2354 }
2355
2356 assert_pinned_region_status();
2357 }
2358
2359 #ifdef ASSERT
2360 void ShenandoahHeap::assert_pinned_region_status() {
2361 for (size_t i = 0; i < num_regions(); i++) {
2362 ShenandoahHeapRegion* r = get_region(i);
2363 shenandoah_assert_generations_reconciled();
2364 if (gc_generation()->contains(r)) {
2365 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2366 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2367 }
2368 }
2369 }
2370 #endif
2371
2372 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2373 return _gc_timer;
2374 }
2375
2376 void ShenandoahHeap::prepare_concurrent_roots() {
2377 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2378 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2379 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2380 set_concurrent_weak_root_in_progress(true);
2381 if (unload_classes()) {
2382 _unloader.prepare();
2383 }
2384 }
2385
2386 void ShenandoahHeap::finish_concurrent_roots() {
2387 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2388 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2389 if (unload_classes()) {
2390 _unloader.finish();
2391 }
2392 }
2393
2394 #ifdef ASSERT
2395 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2396 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2397
2398 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2399 // Use ParallelGCThreads inside safepoints
2400 assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2401 ParallelGCThreads, nworkers);
2402 } else {
2403 // Use ConcGCThreads outside safepoints
2404 assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2405 ConcGCThreads, nworkers);
2406 }
2407 }
2408 #endif
2409
2410 ShenandoahVerifier* ShenandoahHeap::verifier() {
2411 guarantee(ShenandoahVerify, "Should be enabled");
2412 assert (_verifier != nullptr, "sanity");
2413 return _verifier;
2414 }
2415
2416 template<bool CONCURRENT>
2417 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2418 private:
2419 ShenandoahHeap* _heap;
2420 ShenandoahRegionIterator* _regions;
2421 public:
2422 explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2423 WorkerTask("Shenandoah Update References"),
2424 _heap(ShenandoahHeap::heap()),
2425 _regions(regions) {
2426 }
2427
2428 void work(uint worker_id) {
2429 if (CONCURRENT) {
2430 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2431 ShenandoahSuspendibleThreadSetJoiner stsj;
2432 do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2433 } else {
2434 ShenandoahParallelWorkerSession worker_session(worker_id);
2435 do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2436 }
2437 }
2438
2439 private:
2440 template<class T>
2441 void do_work(uint worker_id) {
2442 if (CONCURRENT && (worker_id == 0)) {
2443 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2444 // results of evacuation. These reserves are no longer necessary because evacuation has completed.
2445 size_t cset_regions = _heap->collection_set()->count();
2446
2447 // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2448 // to the mutator free set. At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2449 // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2450 // next GC cycle.
2451 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2452 }
2453 // If !CONCURRENT, there's no value in expanding Mutator free set
2454 T cl;
2455 ShenandoahHeapRegion* r = _regions->next();
2456 while (r != nullptr) {
2457 HeapWord* update_watermark = r->get_update_watermark();
2458 assert (update_watermark >= r->bottom(), "sanity");
2459 if (r->is_active() && !r->is_cset()) {
2460 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2461 if (ShenandoahPacing) {
2462 _heap->pacer()->report_update_refs(pointer_delta(update_watermark, r->bottom()));
2463 }
2464 }
2465 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2466 return;
2467 }
2468 r = _regions->next();
2469 }
2470 }
2471 };
2472
2473 void ShenandoahHeap::update_heap_references(bool concurrent) {
2474 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2475
2476 if (concurrent) {
2477 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2478 workers()->run_task(&task);
2479 } else {
2480 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2481 workers()->run_task(&task);
2482 }
2483 }
2484
2485 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2486 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2487 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2488
2489 {
2490 ShenandoahGCPhase phase(concurrent ?
2491 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2492 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2493
2494 final_update_refs_update_region_states();
2495
2496 assert_pinned_region_status();
2497 }
2498
2499 {
2500 ShenandoahGCPhase phase(concurrent ?
2501 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2502 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2503 trash_cset_regions();
2504 }
2505 }
2506
2507 void ShenandoahHeap::final_update_refs_update_region_states() {
2508 ShenandoahSynchronizePinnedRegionStates cl;
2509 parallel_heap_region_iterate(&cl);
2510 }
2511
2512 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2513 ShenandoahGCPhase phase(concurrent ?
2514 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2515 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2516 ShenandoahHeapLocker locker(lock());
2517 size_t young_cset_regions, old_cset_regions;
2518 size_t first_old_region, last_old_region, old_region_count;
2519 _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2520 // If there are no old regions, first_old_region will be greater than last_old_region
2521 assert((first_old_region > last_old_region) ||
2522 ((last_old_region + 1 - first_old_region >= old_region_count) &&
2523 get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2524 "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT,
2525 old_region_count, first_old_region, last_old_region);
2526
2527 if (mode()->is_generational()) {
2528 #ifdef ASSERT
2529 if (ShenandoahVerify) {
2530 verifier()->verify_before_rebuilding_free_set();
2531 }
2532 #endif
2533
2534 // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2535 // available for transfer to old. Note that transfer of humongous regions does not impact available.
2536 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2537 size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2538 gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2539
2540 // Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
2541 // memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
2542 // regions in place when many of these regular regions have an abundant amount of available memory within them. Fragmentation
2543 // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2544 //
2545 // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2546 // within partially consumed regions of memory.
2547 }
2548 // Rebuild free set based on adjusted generation sizes.
2549 _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2550
2551 if (mode()->is_generational()) {
2552 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2553 ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2554 old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2555 }
2556 }
2557
2558 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2559 print_on(st);
2560 st->cr();
2561 print_heap_regions_on(st);
2562 }
2563
2564 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2565 size_t slice = r->index() / _bitmap_regions_per_slice;
2566
2567 size_t regions_from = _bitmap_regions_per_slice * slice;
2568 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2569 for (size_t g = regions_from; g < regions_to; g++) {
2570 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2571 if (skip_self && g == r->index()) continue;
2572 if (get_region(g)->is_committed()) {
2573 return true;
2574 }
2600 return false;
2601 }
2602
2603 if (AlwaysPreTouch) {
2604 os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2605 }
2606
2607 return true;
2608 }
2609
2610 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2611 shenandoah_assert_heaplocked();
2612
2613 // Bitmaps in special regions do not need uncommits
2614 if (_bitmap_region_special) {
2615 return true;
2616 }
2617
2618 if (is_bitmap_slice_committed(r, true)) {
2619 // Some other region from the group is still committed, meaning the bitmap
2620 // slice should stay committed, exit right away.
2621 return true;
2622 }
2623
2624 // Uncommit the bitmap slice:
2625 size_t slice = r->index() / _bitmap_regions_per_slice;
2626 size_t off = _bitmap_bytes_per_slice * slice;
2627 size_t len = _bitmap_bytes_per_slice;
2628 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2629 return false;
2630 }
2631 return true;
2632 }
2633
2634 void ShenandoahHeap::forbid_uncommit() {
2635 if (_uncommit_thread != nullptr) {
2636 _uncommit_thread->forbid_uncommit();
2637 }
2638 }
2639
2640 void ShenandoahHeap::allow_uncommit() {
2641 if (_uncommit_thread != nullptr) {
2642 _uncommit_thread->allow_uncommit();
2643 }
2644 }
2645
2646 #ifdef ASSERT
2647 bool ShenandoahHeap::is_uncommit_in_progress() {
2648 if (_uncommit_thread != nullptr) {
2649 return _uncommit_thread->is_uncommit_in_progress();
2650 }
2651 return false;
2652 }
2653 #endif
2654
2655 void ShenandoahHeap::safepoint_synchronize_begin() {
2656 SuspendibleThreadSet::synchronize();
2657 }
2658
2659 void ShenandoahHeap::safepoint_synchronize_end() {
2660 SuspendibleThreadSet::desynchronize();
2661 }
2662
2663 void ShenandoahHeap::try_inject_alloc_failure() {
2664 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2665 _inject_alloc_failure.set();
2666 os::naked_short_sleep(1);
2667 if (cancelled_gc()) {
2668 log_info(gc)("Allocation failure was successfully injected");
2669 }
2670 }
2671 }
2672
2673 bool ShenandoahHeap::should_inject_alloc_failure() {
2674 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2675 }
2676
2677 void ShenandoahHeap::initialize_serviceability() {
2678 _memory_pool = new ShenandoahMemoryPool(this);
2679 _cycle_memory_manager.add_pool(_memory_pool);
2680 _stw_memory_manager.add_pool(_memory_pool);
2681 }
2682
2683 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2684 GrowableArray<GCMemoryManager*> memory_managers(2);
2685 memory_managers.append(&_cycle_memory_manager);
2686 memory_managers.append(&_stw_memory_manager);
2687 return memory_managers;
2688 }
2689
2690 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2691 GrowableArray<MemoryPool*> memory_pools(1);
2692 memory_pools.append(_memory_pool);
2693 return memory_pools;
2694 }
2695
2696 MemoryUsage ShenandoahHeap::memory_usage() {
2697 return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2698 }
2699
2700 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2701 _heap(ShenandoahHeap::heap()),
2702 _index(0) {}
2703
2704 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2705 _heap(heap),
2706 _index(0) {}
2707
2708 void ShenandoahRegionIterator::reset() {
2709 _index = 0;
2710 }
2711
2712 bool ShenandoahRegionIterator::has_next() const {
2713 return _index < _heap->num_regions();
2714 }
2715
2716 char ShenandoahHeap::gc_state() const {
2717 return _gc_state.raw_value();
2718 }
2719
2720 bool ShenandoahHeap::is_gc_state(GCState state) const {
2721 // If the global gc state has been changed, but hasn't yet been propagated to all threads, then
2722 // the global gc state is the correct value. Once the gc state has been synchronized with all threads,
2723 // _gc_state_changed will be toggled to false and we need to use the thread local state.
2724 return _gc_state_changed ? _gc_state.is_set(state) : ShenandoahThreadLocalData::is_gc_state(state);
2725 }
2726
2727
2728 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2729 #ifdef ASSERT
2730 assert(_liveness_cache != nullptr, "sanity");
2731 assert(worker_id < _max_workers, "sanity");
2732 for (uint i = 0; i < num_regions(); i++) {
2733 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2734 }
2735 #endif
2736 return _liveness_cache[worker_id];
2737 }
2738
2739 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2740 assert(worker_id < _max_workers, "sanity");
2741 assert(_liveness_cache != nullptr, "sanity");
2742 ShenandoahLiveData* ld = _liveness_cache[worker_id];
2743 for (uint i = 0; i < num_regions(); i++) {
2744 ShenandoahLiveData live = ld[i];
2745 if (live > 0) {
2746 ShenandoahHeapRegion* r = get_region(i);
2747 r->increase_live_data_gc_words(live);
2750 }
2751 }
2752
2753 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2754 if (is_idle()) return false;
2755
2756 // Objects allocated after marking start are implicitly alive, don't need any barriers during
2757 // marking phase.
2758 if (is_concurrent_mark_in_progress() &&
2759 !marking_context()->allocated_after_mark_start(obj)) {
2760 return true;
2761 }
2762
2763 // Can not guarantee obj is deeply good.
2764 if (has_forwarded_objects()) {
2765 return true;
2766 }
2767
2768 return false;
2769 }
2770
2771 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2772 if (!mode()->is_generational()) {
2773 return global_generation();
2774 } else if (affiliation == YOUNG_GENERATION) {
2775 return young_generation();
2776 } else if (affiliation == OLD_GENERATION) {
2777 return old_generation();
2778 }
2779
2780 ShouldNotReachHere();
2781 return nullptr;
2782 }
2783
2784 void ShenandoahHeap::log_heap_status(const char* msg) const {
2785 if (mode()->is_generational()) {
2786 young_generation()->log_status(msg);
2787 old_generation()->log_status(msg);
2788 } else {
2789 global_generation()->log_status(msg);
2790 }
2791 }
|