1 /*
2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
3 * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
27 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29 #include "gc/shenandoah/shenandoahFreeSet.hpp"
30 #include "gc/shenandoah/shenandoahGeneration.hpp"
31 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
32 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
33 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
35 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
36 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
37 #include "gc/shenandoah/shenandoahInitLogger.hpp"
38 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
40 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
42 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
43 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
44 #include "gc/shenandoah/shenandoahUtils.hpp"
45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
46 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
47 #include "logging/log.hpp"
48 #include "utilities/events.hpp"
49
50
51 class ShenandoahGenerationalInitLogger : public ShenandoahInitLogger {
52 public:
53 static void print() {
54 ShenandoahGenerationalInitLogger logger;
55 logger.print_all();
56 }
57 protected:
58 void print_gc_specific() override {
59 ShenandoahInitLogger::print_gc_specific();
60
61 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
62 log_info(gc, init)("Young Heuristics: %s", heap->young_generation()->heuristics()->name());
63 log_info(gc, init)("Old Heuristics: %s", heap->old_generation()->heuristics()->name());
64 }
65 };
66
67 size_t ShenandoahGenerationalHeap::calculate_min_plab() {
68 return align_up(PLAB::min_size(), CardTable::card_size_in_words());
69 }
70
71 size_t ShenandoahGenerationalHeap::calculate_max_plab() {
72 size_t MaxTLABSizeWords = ShenandoahHeapRegion::max_tlab_size_words();
73 return align_down(MaxTLABSizeWords, CardTable::card_size_in_words());
74 }
75
76 // Returns size in bytes
77 size_t ShenandoahGenerationalHeap::unsafe_max_tlab_alloc() const {
78 return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available());
79 }
80
81 ShenandoahGenerationalHeap::ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy) :
82 ShenandoahHeap(policy),
83 _age_census(nullptr),
84 _min_plab_size(calculate_min_plab()),
85 _max_plab_size(calculate_max_plab()),
86 _regulator_thread(nullptr),
87 _young_gen_memory_pool(nullptr),
88 _old_gen_memory_pool(nullptr) {
89 assert(is_aligned(_min_plab_size, CardTable::card_size_in_words()), "min_plab_size must be aligned");
90 assert(is_aligned(_max_plab_size, CardTable::card_size_in_words()), "max_plab_size must be aligned");
91 }
92
93 void ShenandoahGenerationalHeap::post_initialize() {
94 ShenandoahHeap::post_initialize();
95 _age_census = new ShenandoahAgeCensus();
96 }
97
98 void ShenandoahGenerationalHeap::print_init_logger() const {
99 ShenandoahGenerationalInitLogger logger;
100 logger.print_all();
101 }
102
103 void ShenandoahGenerationalHeap::initialize_heuristics() {
104 // Initialize global generation and heuristics even in generational mode.
105 ShenandoahHeap::initialize_heuristics();
106
107 // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity
108 // for old would be total heap - minimum capacity of young. This means the sum of the maximum
109 // allowed for old and young could exceed the total heap size. It remains the case that the
110 // _actual_ capacity of young + old = total.
111 size_t region_count = num_regions();
112 size_t max_young_regions = MAX2((region_count * ShenandoahMaxYoungPercentage) / 100, (size_t) 1U);
113 size_t initial_capacity_young = max_young_regions * ShenandoahHeapRegion::region_size_bytes();
114 size_t max_capacity_young = initial_capacity_young;
115 size_t initial_capacity_old = max_capacity() - max_capacity_young;
116 size_t max_capacity_old = max_capacity() - initial_capacity_young;
117
118 _young_generation = new ShenandoahYoungGeneration(max_workers());
119 _old_generation = new ShenandoahOldGeneration(max_workers());
120 _young_generation->initialize_heuristics(mode());
121 _old_generation->initialize_heuristics(mode());
122 }
123
124 void ShenandoahGenerationalHeap::post_initialize_heuristics() {
125 ShenandoahHeap::post_initialize_heuristics();
126 _young_generation->post_initialize(this);
127 _old_generation->post_initialize(this);
128 }
129
130 void ShenandoahGenerationalHeap::initialize_serviceability() {
131 assert(mode()->is_generational(), "Only for the generational mode");
132 _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
133 _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this);
134 cycle_memory_manager()->add_pool(_young_gen_memory_pool);
135 cycle_memory_manager()->add_pool(_old_gen_memory_pool);
136 stw_memory_manager()->add_pool(_young_gen_memory_pool);
137 stw_memory_manager()->add_pool(_old_gen_memory_pool);
138 }
139
140 GrowableArray<MemoryPool*> ShenandoahGenerationalHeap::memory_pools() {
141 assert(mode()->is_generational(), "Only for the generational mode");
142 GrowableArray<MemoryPool*> memory_pools(2);
143 memory_pools.append(_young_gen_memory_pool);
144 memory_pools.append(_old_gen_memory_pool);
145 return memory_pools;
146 }
147
148 void ShenandoahGenerationalHeap::initialize_controller() {
149 auto control_thread = new ShenandoahGenerationalControlThread();
150 _control_thread = control_thread;
151 _regulator_thread = new ShenandoahRegulatorThread(control_thread);
152 }
153
154 void ShenandoahGenerationalHeap::gc_threads_do(ThreadClosure* tcl) const {
155 if (!shenandoah_policy()->is_at_shutdown()) {
156 ShenandoahHeap::gc_threads_do(tcl);
157 tcl->do_thread(regulator_thread());
158 }
159 }
160
161 void ShenandoahGenerationalHeap::stop() {
162 ShenandoahHeap::stop();
163 regulator_thread()->stop();
164 }
165
166 bool ShenandoahGenerationalHeap::requires_barriers(stackChunkOop obj) const {
167 if (is_idle()) {
168 return false;
169 }
170
171 if (is_concurrent_young_mark_in_progress() && is_in_young(obj) && !marking_context()->allocated_after_mark_start(obj)) {
172 // We are marking young, this object is in young, and it is below the TAMS
173 return true;
174 }
175
176 if (is_in_old(obj)) {
177 // Card marking barriers are required for objects in the old generation
178 return true;
179 }
180
181 if (has_forwarded_objects()) {
182 // Object may have pointers that need to be updated
183 return true;
184 }
185
186 return false;
187 }
188
189 void ShenandoahGenerationalHeap::evacuate_collection_set(ShenandoahGeneration* generation, bool concurrent) {
190 ShenandoahRegionIterator regions;
191 ShenandoahGenerationalEvacuationTask task(this, generation, ®ions, concurrent, false /* only promote regions */);
192 workers()->run_task(&task);
193 }
194
195 void ShenandoahGenerationalHeap::promote_regions_in_place(ShenandoahGeneration* generation, bool concurrent) {
196 ShenandoahRegionIterator regions;
197 ShenandoahGenerationalEvacuationTask task(this, generation, ®ions, concurrent, true /* only promote regions */);
198 workers()->run_task(&task);
199 }
200
201 oop ShenandoahGenerationalHeap::evacuate_object(oop p, Thread* thread) {
202 assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
203 if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
204 // This thread went through the OOM during evac protocol and it is safe to return
205 // the forward pointer. It must not attempt to evacuate anymore.
206 return ShenandoahBarrierSet::resolve_forwarded(p);
207 }
208
209 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
210
211 ShenandoahHeapRegion* r = heap_region_containing(p);
212 assert(!r->is_humongous(), "never evacuate humongous objects");
213
214 ShenandoahAffiliation target_gen = r->affiliation();
215 // gc_generation() can change asynchronously and should not be used here.
216 assert(active_generation() != nullptr, "Error");
217 if (active_generation()->is_young() && target_gen == YOUNG_GENERATION) {
218 markWord mark = p->mark();
219 if (mark.is_marked()) {
220 // Already forwarded.
221 return ShenandoahBarrierSet::resolve_forwarded(p);
222 }
223
224 if (mark.has_displaced_mark_helper()) {
225 // We don't want to deal with MT here just to ensure we read the right mark word.
226 // Skip the potential promotion attempt for this one.
227 } else if (age_census()->is_tenurable(r->age() + mark.age())) {
228 oop result = try_evacuate_object(p, thread, r, OLD_GENERATION);
229 if (result != nullptr) {
230 return result;
231 }
232 // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen.
233 }
234 }
235 return try_evacuate_object(p, thread, r, target_gen);
236 }
237
238 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
239 // to OLD_GENERATION.
240 oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
241 ShenandoahAffiliation target_gen) {
242 bool alloc_from_lab = true;
243 bool has_plab = false;
244 HeapWord* copy = nullptr;
245
246 markWord mark = p->mark();
247 if (ShenandoahForwarding::is_forwarded(mark)) {
248 return ShenandoahForwarding::get_forwardee(p);
249 }
250 size_t old_size = ShenandoahForwarding::size(p);
251 size_t size = p->copy_size(old_size, mark);
252
253 bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young();
254
255 #ifdef ASSERT
256 if (ShenandoahOOMDuringEvacALot &&
257 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
258 copy = nullptr;
259 } else {
260 #endif
261 if (UseTLAB) {
262 switch (target_gen) {
263 case YOUNG_GENERATION: {
264 copy = allocate_from_gclab(thread, size);
265 if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
266 // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve. Try resetting
267 // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
268 ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
269 copy = allocate_from_gclab(thread, size);
270 // If we still get nullptr, we'll try a shared allocation below.
271 }
272 break;
273 }
274 case OLD_GENERATION: {
275 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
276 if (plab != nullptr) {
277 has_plab = true;
278 copy = allocate_from_plab(thread, size, is_promotion);
279 if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) &&
280 ShenandoahThreadLocalData::plab_retries_enabled(thread)) {
281 // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because
282 // the requested object does not fit within the current plab but the plab still has an "abundance" of memory,
283 // where abundance is defined as >= ShenGenHeap::plab_min_size(). In the former case, we try shrinking the
284 // desired PLAB size to the minimum and retry PLAB allocation to avoid cascading of shared memory allocations.
285 // Shrinking the desired PLAB size may allow us to eke out a small PLAB while staying beneath evacuation reserve.
286 if (plab->words_remaining() < plab_min_size()) {
287 ShenandoahThreadLocalData::set_plab_size(thread, plab_min_size());
288 copy = allocate_from_plab(thread, size, is_promotion);
289 // If we still get nullptr, we'll try a shared allocation below.
290 if (copy == nullptr) {
291 // If retry fails, don't continue to retry until we have success (probably in next GC pass)
292 ShenandoahThreadLocalData::disable_plab_retries(thread);
293 }
294 }
295 // else, copy still equals nullptr. this causes shared allocation below, preserving this plab for future needs.
296 }
297 }
298 break;
299 }
300 default: {
301 ShouldNotReachHere();
302 break;
303 }
304 }
305 }
306
307 if (copy == nullptr) {
308 // If we failed to allocate in LAB, we'll try a shared allocation.
309 if (!is_promotion || !has_plab || (size > PLAB::min_size())) {
310 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen, is_promotion);
311 copy = allocate_memory(req);
312 alloc_from_lab = false;
313 }
314 // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
315 // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
316 // costly. Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
317 // evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
318 }
319 #ifdef ASSERT
320 }
321 #endif
322
323 if (copy == nullptr) {
324 if (target_gen == OLD_GENERATION) {
325 if (from_region->is_young()) {
326 // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
327 old_generation()->handle_failed_promotion(thread, size);
328 return nullptr;
329 } else {
330 // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
331 // after the evacuation threads have finished.
332 old_generation()->handle_failed_evacuation();
333 }
334 }
335
336 control_thread()->handle_alloc_failure_evac(size);
337
338 oom_evac_handler()->handle_out_of_memory_during_evacuation();
339
340 return ShenandoahBarrierSet::resolve_forwarded(p);
341 }
342
343 if (ShenandoahEvacTracking) {
344 evac_tracker()->begin_evacuation(thread, size * HeapWordSize, from_region->affiliation(), target_gen);
345 }
346
347 // Copy the object:
348 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, old_size);
349 oop copy_val = cast_to_oop(copy);
350
351 // Update the age of the evacuated object
352 if (target_gen == YOUNG_GENERATION && is_aging_cycle()) {
353 ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1);
354 }
355
356 // Try to install the new forwarding pointer.
357 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
358 if (result == copy_val) {
359 // Successfully evacuated. Our copy is now the public one!
360
361 // This is necessary for virtual thread support. This uses the mark word without
362 // considering that it may now be a forwarding pointer (and could therefore crash).
363 // Secondarily, we do not want to spend cycles relativizing stack chunks for oops
364 // that lost the evacuation race (and will therefore not become visible). It is
365 // safe to do this on the public copy (this is also done during concurrent mark).
366 copy_val->initialize_hash_if_necessary(p);
367 ContinuationGCSupport::relativize_stack_chunk(copy_val);
368
369 if (ShenandoahEvacTracking) {
370 // Record that the evacuation succeeded
371 evac_tracker()->end_evacuation(thread, size * HeapWordSize, from_region->affiliation(), target_gen);
372 }
373
374 if (target_gen == OLD_GENERATION) {
375 old_generation()->handle_evacuation(copy, size, from_region->is_young());
376 } else {
377 // When copying to the old generation above, we don't care
378 // about recording object age in the census stats.
379 assert(target_gen == YOUNG_GENERATION, "Error");
380 }
381 shenandoah_assert_correct(nullptr, copy_val);
382 return copy_val;
383 } else {
384 // Failed to evacuate. We need to deal with the object that is left behind. Since this
385 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
386 // But if it happens to contain references to evacuated regions, those references would
387 // not get updated for this stale copy during this cycle, and we will crash while scanning
388 // it the next cycle.
389 if (alloc_from_lab) {
390 // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
391 // object will overwrite this stale copy, or the filler object on LAB retirement will
392 // do this.
393 switch (target_gen) {
394 case YOUNG_GENERATION: {
395 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
396 break;
397 }
398 case OLD_GENERATION: {
399 ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size);
400 if (is_promotion) {
401 ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize);
402 }
403 break;
404 }
405 default: {
406 ShouldNotReachHere();
407 break;
408 }
409 }
410 } else {
411 // For non-LAB allocations, we have no way to retract the allocation, and
412 // have to explicitly overwrite the copy with the filler object. With that overwrite,
413 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
414 assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
415 fill_with_object(copy, size);
416 shenandoah_assert_correct(nullptr, copy_val);
417 // For non-LAB allocations, the object has already been registered
418 }
419 shenandoah_assert_correct(nullptr, result);
420 return result;
421 }
422 }
423
424 inline HeapWord* ShenandoahGenerationalHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) {
425 assert(UseTLAB, "TLABs should be enabled");
426
427 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
428 HeapWord* obj;
429
430 if (plab == nullptr) {
431 assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name());
432 // No PLABs in this thread, fallback to shared allocation
433 return nullptr;
434 } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
435 return nullptr;
436 }
437 // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy
438 obj = plab->allocate(size);
439 if ((obj == nullptr) && (plab->words_remaining() < plab_min_size())) {
440 // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations
441 obj = allocate_from_plab_slow(thread, size, is_promotion);
442 }
443 // if plab->words_remaining() >= ShenGenHeap::heap()->plab_min_size(), just return nullptr so we can use a shared allocation
444 if (obj == nullptr) {
445 return nullptr;
446 }
447
448 if (is_promotion) {
449 ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize);
450 }
451 return obj;
452 }
453
454 // Establish a new PLAB and allocate size HeapWords within it.
455 HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
456 assert(mode()->is_generational(), "PLABs only relevant to generational GC");
457
458 const size_t plab_min_size = this->plab_min_size();
459 // PLABs are aligned to card boundaries to avoid synchronization with concurrent
460 // allocations in other PLABs.
461 const size_t min_size = (size > plab_min_size)? align_up(size, CardTable::card_size_in_words()): plab_min_size;
462
463 // Figure out size of new PLAB, using value determined at last refill.
464 size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
465 if (cur_size == 0) {
466 cur_size = plab_min_size;
467 }
468
469 // Expand aggressively, doubling at each refill in this epoch, ceiling at plab_max_size()
470 const size_t future_size = MIN2(cur_size * 2, plab_max_size());
471 // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor
472 // are card multiples.)
473 assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: %zu"
474 ", card_size: %u, cur_size: %zu, max: %zu",
475 future_size, CardTable::card_size_in_words(), cur_size, plab_max_size());
476
477 // Record new heuristic value even if we take any shortcut. This captures
478 // the case when moderately-sized objects always take a shortcut. At some point,
479 // heuristics should catch up with them. Note that the requested cur_size may
480 // not be honored, but we remember that this is the preferred size.
481 log_debug(gc, plab)("Set next PLAB refill size: %zu bytes", future_size * HeapWordSize);
482 ShenandoahThreadLocalData::set_plab_size(thread, future_size);
483
484 if (cur_size < size) {
485 // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
486 // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
487 log_debug(gc, plab)("Current PLAB size (%zu) is too small for %zu", cur_size * HeapWordSize, size * HeapWordSize);
488 return nullptr;
489 }
490
491 // Retire current PLAB, and allocate a new one.
492 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
493 if (plab->words_remaining() < plab_min_size) {
494 // Retire current PLAB. This takes care of any PLAB book-keeping.
495 // retire_plab() registers the remnant filler object with the remembered set scanner without a lock.
496 // Since PLABs are card-aligned, concurrent registrations in other PLABs don't interfere.
497 retire_plab(plab, thread);
498
499 size_t actual_size = 0;
500 HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
501 if (plab_buf == nullptr) {
502 if (min_size == plab_min_size) {
503 // Disable PLAB promotions for this thread because we cannot even allocate a minimal PLAB. This allows us
504 // to fail faster on subsequent promotion attempts.
505 ShenandoahThreadLocalData::disable_plab_promotions(thread);
506 }
507 return nullptr;
508 } else {
509 ShenandoahThreadLocalData::enable_plab_retries(thread);
510 }
511 // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail.
512 if (ZeroTLAB) {
513 // ... and clear it.
514 Copy::zero_to_words(plab_buf, actual_size);
515 } else {
516 // ...and zap just allocated object.
517 #ifdef ASSERT
518 // Skip mangling the space corresponding to the object header to
519 // ensure that the returned space is not considered parsable by
520 // any concurrent GC thread.
521 size_t hdr_size = oopDesc::header_size();
522 Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
523 #endif // ASSERT
524 }
525 assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design");
526 plab->set_buf(plab_buf, actual_size);
527 if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
528 return nullptr;
529 }
530 return plab->allocate(size);
531 } else {
532 // If there's still at least min_size() words available within the current plab, don't retire it. Let's nibble
533 // away on this plab as long as we can. Meanwhile, return nullptr to force this particular allocation request
534 // to be satisfied with a shared allocation. By packing more promotions into the previously allocated PLAB, we
535 // reduce the likelihood of evacuation failures, and we reduce the need for downsizing our PLABs.
536 return nullptr;
537 }
538 }
539
540 HeapWord* ShenandoahGenerationalHeap::allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size) {
541 // Align requested sizes to card-sized multiples. Align down so that we don't violate max size of TLAB.
542 assert(is_aligned(min_size, CardTable::card_size_in_words()), "Align by design");
543 assert(word_size >= min_size, "Requested PLAB is too small");
544
545 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
546 // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
547 // if we are at risk of infringing on the old-gen evacuation budget.
548 HeapWord* res = allocate_memory(req);
549 if (res != nullptr) {
550 *actual_size = req.actual_size();
551 } else {
552 *actual_size = 0;
553 }
554 assert(is_aligned(res, CardTable::card_size_in_words()), "Align by design");
555 return res;
556 }
557
558 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) {
559 // We don't enforce limits on plab evacuations. We let it consume all available old-gen memory in order to reduce
560 // probability of an evacuation failure. We do enforce limits on promotion, to make sure that excessive promotion
561 // does not result in an old-gen evacuation failure. Note that a failed promotion is relatively harmless. Any
562 // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
563
564 // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
565 // promotions. Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
566 // 1. Some of the plab may have been dedicated to evacuations.
567 // 2. Some of the plab may have been abandoned due to waste (at the end of the plab).
568 size_t not_promoted =
569 ShenandoahThreadLocalData::get_plab_actual_size(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
570 ShenandoahThreadLocalData::reset_plab_promoted(thread);
571 ShenandoahThreadLocalData::set_plab_actual_size(thread, 0);
572 if (not_promoted > 0) {
573 log_debug(gc, plab)("Retire PLAB, unexpend unpromoted: %zu", not_promoted * HeapWordSize);
574 old_generation()->unexpend_promoted(not_promoted);
575 }
576 const size_t original_waste = plab->waste();
577 HeapWord* const top = plab->top();
578
579 // plab->retire() overwrites unused memory between plab->top() and plab->hard_end() with a dummy object to make memory parsable.
580 // It adds the size of this unused memory, in words, to plab->waste().
581 plab->retire();
582 if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) {
583 // If retiring the plab created a filler object, then we need to register it with our card scanner so it can
584 // safely walk the region backing the plab.
585 log_debug(gc, plab)("retire_plab() is registering remnant of size %zu at " PTR_FORMAT,
586 (plab->waste() - original_waste) * HeapWordSize, p2i(top));
587 // No lock is necessary because the PLAB memory is aligned on card boundaries.
588 old_generation()->card_scan()->register_object_without_lock(top);
589 }
590 }
591
592 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) {
593 Thread* thread = Thread::current();
594 retire_plab(plab, thread);
595 }
596
597 // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations
598 // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to
599 // xfer_limit, and any surplus is transferred to the young generation.
600 //
601 // xfer_limit is the maximum we're able to transfer from young to old based on either:
602 // 1. an assumption that we will be able to replenish memory "borrowed" from young at the end of collection, or
603 // 2. there is sufficient excess in the allocation runway during GC idle cycles
604 void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) {
605
606 // We can limit the old reserve to the size of anticipated promotions:
607 // max_old_reserve is an upper bound on memory evacuated from old and promoted to old,
608 // clamped by the old generation space available.
609 //
610 // Here's the algebra.
611 // Let SOEP = ShenandoahOldEvacRatioPercent,
612 // OE = old evac,
613 // YE = young evac, and
614 // TE = total evac = OE + YE
615 // By definition:
616 // SOEP/100 = OE/TE
617 // = OE/(OE+YE)
618 // => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c)
619 // = OE/YE
620 // => OE = YE*SOEP/(100-SOEP)
621
622 // We have to be careful in the event that SOEP is set to 100 by the user.
623 assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
624 const size_t old_available = old_generation()->available();
625 // The free set will reserve this amount of memory to hold young evacuations
626 const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
627
628 // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit.
629
630 const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve;
631 const double max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve:
632 MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent)
633 / double(100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve));
634
635 const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
636
637 // Decide how much old space we should reserve for a mixed collection
638 double reserve_for_mixed = 0;
639 if (old_generation()->has_unprocessed_collection_candidates()) {
640 // We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we
641 // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
642 const double max_evac_need =
643 (double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste);
644 assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
645 "Unaffiliated available must be less than total available");
646 const double old_fragmented_available =
647 double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes);
648 reserve_for_mixed = max_evac_need + old_fragmented_available;
649 if (reserve_for_mixed > max_old_reserve) {
650 reserve_for_mixed = max_old_reserve;
651 }
652 }
653
654 // Decide how much space we should reserve for promotions from young
655 size_t reserve_for_promo = 0;
656 const size_t promo_load = old_generation()->get_promotion_potential();
657 const bool doing_promotions = promo_load > 0;
658 if (doing_promotions) {
659 // We're promoting and have a bound on the maximum amount that can be promoted
660 assert(max_old_reserve >= reserve_for_mixed, "Sanity");
661 const size_t available_for_promotions = max_old_reserve - reserve_for_mixed;
662 reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions);
663 }
664
665 // This is the total old we want to ideally reserve
666 const size_t old_reserve = reserve_for_mixed + reserve_for_promo;
667 assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations");
668
669 // We now check if the old generation is running a surplus or a deficit.
670 const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes;
671 if (max_old_available >= old_reserve) {
672 // We are running a surplus, so the old region surplus can go to young
673 const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes;
674 const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions;
675 const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions);
676 old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
677 } else {
678 // We are running a deficit which we'd like to fill from young.
679 // Ignore that this will directly impact young_generation()->max_capacity(),
680 // indirectly impacting young_reserve and old_reserve. These computations are conservative.
681 // Note that deficit is rounded up by one region.
682 const size_t old_need = (old_reserve - max_old_available + region_size_bytes - 1) / region_size_bytes;
683 const size_t max_old_region_xfer = old_xfer_limit / region_size_bytes;
684
685 // Round down the regions we can transfer from young to old. If we're running short
686 // on young-gen memory, we restrict the xfer. Old-gen collection activities will be
687 // curtailed if the budget is restricted.
688 const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer);
689 old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
690 }
691 }
692
693 void ShenandoahGenerationalHeap::reset_generation_reserves() {
694 ShenandoahHeapLocker locker(lock());
695 young_generation()->set_evacuation_reserve(0);
696 old_generation()->set_evacuation_reserve(0);
697 old_generation()->set_promoted_reserve(0);
698 }
699
700 void ShenandoahGenerationalHeap::TransferResult::print_on(const char* when, outputStream* ss) const {
701 auto heap = ShenandoahGenerationalHeap::heap();
702 ShenandoahYoungGeneration* const young_gen = heap->young_generation();
703 ShenandoahOldGeneration* const old_gen = heap->old_generation();
704 const size_t young_available = young_gen->available();
705 const size_t old_available = old_gen->available();
706 ss->print_cr("After %s, %s %zu regions to %s to prepare for next gc, old available: "
707 PROPERFMT ", young_available: " PROPERFMT,
708 when,
709 success? "successfully transferred": "failed to transfer", region_count, region_destination,
710 PROPERFMTARGS(old_available), PROPERFMTARGS(young_available));
711 }
712
713 void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) {
714 class ShenandoahGlobalCoalesceAndFill : public WorkerTask {
715 private:
716 ShenandoahPhaseTimings::Phase _phase;
717 ShenandoahRegionIterator _regions;
718 public:
719 explicit ShenandoahGlobalCoalesceAndFill(ShenandoahPhaseTimings::Phase phase) :
720 WorkerTask("Shenandoah Global Coalesce"),
721 _phase(phase) {}
722
723 void work(uint worker_id) override {
724 ShenandoahWorkerTimingsTracker timer(_phase,
725 ShenandoahPhaseTimings::ScanClusters,
726 worker_id, true);
727 ShenandoahHeapRegion* region;
728 while ((region = _regions.next()) != nullptr) {
729 // old region is not in the collection set and was not immediately trashed
730 if (region->is_old() && region->is_active() && !region->is_humongous()) {
731 // Reset the coalesce and fill boundary because this is a global collect
732 // and cannot be preempted by young collects. We want to be sure the entire
733 // region is coalesced here and does not resume from a previously interrupted
734 // or completed coalescing.
735 region->begin_preemptible_coalesce_and_fill();
736 region->oop_coalesce_and_fill(false);
737 }
738 }
739 }
740 };
741
742 ShenandoahPhaseTimings::Phase phase = concurrent ?
743 ShenandoahPhaseTimings::conc_coalesce_and_fill :
744 ShenandoahPhaseTimings::degen_gc_coalesce_and_fill;
745
746 // This is not cancellable
747 ShenandoahGlobalCoalesceAndFill coalesce(phase);
748 workers()->run_task(&coalesce);
749 old_generation()->set_parsable(true);
750 }
751
752 template<bool CONCURRENT>
753 class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
754 private:
755 // For update refs, _generation will be young or global. Mixed collections use the young generation.
756 ShenandoahGeneration* _generation;
757 ShenandoahGenerationalHeap* _heap;
758 ShenandoahRegionIterator* _regions;
759 ShenandoahRegionChunkIterator* _work_chunks;
760
761 public:
762 ShenandoahGenerationalUpdateHeapRefsTask(ShenandoahGeneration* generation,
763 ShenandoahRegionIterator* regions,
764 ShenandoahRegionChunkIterator* work_chunks) :
765 WorkerTask("Shenandoah Update References"),
766 _generation(generation),
767 _heap(ShenandoahGenerationalHeap::heap()),
768 _regions(regions),
769 _work_chunks(work_chunks)
770 {
771 const bool old_bitmap_stable = _heap->old_generation()->is_mark_complete();
772 log_debug(gc, remset)("Update refs, scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
773 }
774
775 void work(uint worker_id) override {
776 if (CONCURRENT) {
777 ShenandoahConcurrentWorkerSession worker_session(worker_id);
778 ShenandoahSuspendibleThreadSetJoiner stsj;
779 do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
780 } else {
781 ShenandoahParallelWorkerSession worker_session(worker_id);
782 do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
783 }
784 }
785
786 private:
787 template<class T>
788 void do_work(uint worker_id) {
789 T cl;
790
791 if (CONCURRENT && (worker_id == 0)) {
792 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
793 // results of evacuation. These reserves are no longer necessary because evacuation has completed.
794 size_t cset_regions = _heap->collection_set()->count();
795
796 // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
797 // to the mutator free set. At the end of GC, we will have cset_regions newly evacuated fully empty regions from
798 // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
799 // next GC cycle.
800 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
801 }
802 // If !CONCURRENT, there's no value in expanding Mutator free set
803
804 ShenandoahHeapRegion* r = _regions->next();
805 // We update references for global, mixed, and young collections.
806 assert(_generation->is_mark_complete(), "Expected complete marking");
807 ShenandoahMarkingContext* const ctx = _heap->marking_context();
808 bool is_mixed = _heap->collection_set()->has_old_regions();
809 while (r != nullptr) {
810 HeapWord* update_watermark = r->get_update_watermark();
811 assert(update_watermark >= r->bottom(), "sanity");
812
813 log_debug(gc)("Update refs worker " UINT32_FORMAT ", looking at region %zu", worker_id, r->index());
814 if (r->is_active() && !r->is_cset()) {
815 if (r->is_young()) {
816 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
817 } else if (r->is_old()) {
818 if (_generation->is_global()) {
819
820 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
821 }
822 // Otherwise, this is an old region in a young or mixed cycle. Process it during a second phase, below.
823 } else {
824 // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
825 // to a non-free active region while this loop is executing. Whenever this happens, the changing of a region's
826 // active status may propagate at a different speed than the changing of the region's affiliation.
827
828 // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
829 // by this thread before the region's affiliation() is seen by this thread.
830
831 // It's ok for this race to occur because the newly transformed region does not have any references to be
832 // updated.
833
834 assert(r->get_update_watermark() == r->bottom(),
835 "%s Region %zu is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
836 r->affiliation_name(), r->index());
837 }
838 }
839
840 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
841 return;
842 }
843
844 r = _regions->next();
845 }
846
847 if (_generation->is_young()) {
848 // Since this is generational and not GLOBAL, we have to process the remembered set. There's no remembered
849 // set processing if not in generational mode or if GLOBAL mode.
850
851 // After this thread has exhausted its traditional update-refs work, it continues with updating refs within
852 // remembered set. The remembered set workload is better balanced between threads, so threads that are "behind"
853 // can catch up with other threads during this phase, allowing all threads to work more effectively in parallel.
854 update_references_in_remembered_set(worker_id, cl, ctx, is_mixed);
855 }
856 }
857
858 template<class T>
859 void update_references_in_remembered_set(uint worker_id, T &cl, const ShenandoahMarkingContext* ctx, bool is_mixed) {
860
861 struct ShenandoahRegionChunk assignment;
862 ShenandoahScanRemembered* scanner = _heap->old_generation()->card_scan();
863
864 while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) {
865 // Keep grabbing next work chunk to process until finished, or asked to yield
866 ShenandoahHeapRegion* r = assignment._r;
867 if (r->is_active() && !r->is_cset() && r->is_old()) {
868 HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
869 HeapWord* end_of_range = r->get_update_watermark();
870 if (end_of_range > start_of_range + assignment._chunk_size) {
871 end_of_range = start_of_range + assignment._chunk_size;
872 }
873
874 if (start_of_range >= end_of_range) {
875 continue;
876 }
877
878 // Old region in a young cycle or mixed cycle.
879 if (is_mixed) {
880 if (r->is_humongous()) {
881 // Need to examine both dirty and clean cards during mixed evac.
882 r->oop_iterate_humongous_slice_all(&cl,start_of_range, assignment._chunk_size);
883 } else {
884 // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
885 // and filled. This will use mark bits to find objects that need to be updated.
886 update_references_in_old_region(cl, ctx, scanner, r, start_of_range, end_of_range);
887 }
888 } else {
889 // This is a young evacuation
890 size_t cluster_size = CardTable::card_size_in_words() * ShenandoahCardCluster::CardsPerCluster;
891 size_t clusters = assignment._chunk_size / cluster_size;
892 assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
893 scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id);
894 }
895 }
896 }
897 }
898
899 template<class T>
900 void update_references_in_old_region(T &cl, const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner,
901 const ShenandoahHeapRegion* r, HeapWord* start_of_range,
902 HeapWord* end_of_range) const {
903 // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
904 ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
905
906 // Any object that begins in a previous range is part of a different scanning assignment. Any object that
907 // starts after end_of_range is also not my responsibility. (Either allocated during evacuation, so does
908 // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
909
910 // Find the first object that begins in my range, if there is one. Note that `p` will be set to `end_of_range`
911 // when no live object is found in the range.
912 HeapWord* tams = ctx->top_at_mark_start(r);
913 HeapWord* p = get_first_object_start_word(ctx, scanner, tams, start_of_range, end_of_range);
914
915 while (p < end_of_range) {
916 // p is known to point to the beginning of marked object obj
917 oop obj = cast_to_oop(p);
918 objs.do_object(obj);
919 HeapWord* prev_p = p;
920 p += obj->size();
921 if (p < tams) {
922 p = ctx->get_next_marked_addr(p, tams);
923 // If there are no more marked objects before tams, this returns tams. Note that tams is
924 // either >= end_of_range, or tams is the start of an object that is marked.
925 }
926 assert(p != prev_p, "Lack of forward progress");
927 }
928 }
929
930 HeapWord* get_first_object_start_word(const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner, HeapWord* tams,
931 HeapWord* start_of_range, HeapWord* end_of_range) const {
932 HeapWord* p = start_of_range;
933
934 if (p >= tams) {
935 // We cannot use ctx->is_marked(obj) to test whether an object begins at this address. Instead,
936 // we need to use the remembered set crossing map to advance p to the first object that starts
937 // within the enclosing card.
938 size_t card_index = scanner->card_index_for_addr(start_of_range);
939 while (true) {
940 HeapWord* first_object = scanner->first_object_in_card(card_index);
941 if (first_object != nullptr) {
942 p = first_object;
943 break;
944 } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
945 card_index++;
946 } else {
947 // Signal that no object was found in range
948 p = end_of_range;
949 break;
950 }
951 }
952 } else if (!ctx->is_marked(cast_to_oop(p))) {
953 p = ctx->get_next_marked_addr(p, tams);
954 // If there are no more marked objects before tams, this returns tams.
955 // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
956 }
957 return p;
958 }
959 };
960
961 void ShenandoahGenerationalHeap::update_heap_references(ShenandoahGeneration* generation, bool concurrent) {
962 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
963 const uint nworkers = workers()->active_workers();
964 ShenandoahRegionChunkIterator work_list(nworkers);
965 if (concurrent) {
966 ShenandoahGenerationalUpdateHeapRefsTask<true> task(generation, &_update_refs_iterator, &work_list);
967 workers()->run_task(&task);
968 } else {
969 ShenandoahGenerationalUpdateHeapRefsTask<false> task(generation, &_update_refs_iterator, &work_list);
970 workers()->run_task(&task);
971 }
972
973 if (ShenandoahEnableCardStats) {
974 // Only do this if we are collecting card stats
975 ShenandoahScanRemembered* card_scan = old_generation()->card_scan();
976 assert(card_scan != nullptr, "Card table must exist when card stats are enabled");
977 card_scan->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS);
978 }
979 }
980
981 struct ShenandoahCompositeRegionClosure {
982 template<typename C1, typename C2>
983 class Closure : public ShenandoahHeapRegionClosure {
984 private:
985 C1 &_c1;
986 C2 &_c2;
987
988 public:
989 Closure(C1 &c1, C2 &c2) : ShenandoahHeapRegionClosure(), _c1(c1), _c2(c2) {}
990
991 void heap_region_do(ShenandoahHeapRegion* r) override {
992 _c1.heap_region_do(r);
993 _c2.heap_region_do(r);
994 }
995
996 bool is_thread_safe() override {
997 return _c1.is_thread_safe() && _c2.is_thread_safe();
998 }
999 };
1000
1001 template<typename C1, typename C2>
1002 static Closure<C1, C2> of(C1 &c1, C2 &c2) {
1003 return Closure<C1, C2>(c1, c2);
1004 }
1005 };
1006
1007 class ShenandoahUpdateRegionAges : public ShenandoahHeapRegionClosure {
1008 private:
1009 ShenandoahMarkingContext* _ctx;
1010
1011 public:
1012 explicit ShenandoahUpdateRegionAges(ShenandoahMarkingContext* ctx) : _ctx(ctx) { }
1013
1014 void heap_region_do(ShenandoahHeapRegion* r) override {
1015 // Maintenance of region age must follow evacuation in order to account for
1016 // evacuation allocations within survivor regions. We consult region age during
1017 // the subsequent evacuation to determine whether certain objects need to
1018 // be promoted.
1019 if (r->is_young() && r->is_active()) {
1020 HeapWord *tams = _ctx->top_at_mark_start(r);
1021 HeapWord *top = r->top();
1022
1023 // Allocations move the watermark when top moves. However, compacting
1024 // objects will sometimes lower top beneath the watermark, after which,
1025 // attempts to read the watermark will assert out (watermark should not be
1026 // higher than top).
1027 if (top > tams) {
1028 // There have been allocations in this region since the start of the cycle.
1029 // Any objects new to this region must not assimilate elevated age.
1030 r->reset_age();
1031 } else if (ShenandoahGenerationalHeap::heap()->is_aging_cycle()) {
1032 r->increment_age();
1033 }
1034 }
1035 }
1036
1037 bool is_thread_safe() override {
1038 return true;
1039 }
1040 };
1041
1042 void ShenandoahGenerationalHeap::final_update_refs_update_region_states() {
1043 ShenandoahSynchronizePinnedRegionStates pins;
1044 ShenandoahUpdateRegionAges ages(marking_context());
1045 auto cl = ShenandoahCompositeRegionClosure::of(pins, ages);
1046 parallel_heap_region_iterate(&cl);
1047 }
1048
1049 void ShenandoahGenerationalHeap::complete_degenerated_cycle() {
1050 shenandoah_assert_heaplocked_or_safepoint();
1051 // In case degeneration interrupted concurrent evacuation or update references, we need to clean up
1052 // transient state. Otherwise, these actions have no effect.
1053 reset_generation_reserves();
1054
1055 if (!old_generation()->is_parsable()) {
1056 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill);
1057 coalesce_and_fill_old_regions(false);
1058 }
1059 }
1060
1061 void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
1062 if (!old_generation()->is_parsable()) {
1063 // Class unloading may render the card offsets unusable, so we must rebuild them before
1064 // the next remembered set scan. We _could_ let the control thread do this sometime after
1065 // the global cycle has completed and before the next young collection, but under memory
1066 // pressure the control thread may not have the time (that is, because it's running back
1067 // to back GCs). In that scenario, we would have to make the old regions parsable before
1068 // we could start a young collection. This could delay the start of the young cycle and
1069 // throw off the heuristics.
1070 entry_global_coalesce_and_fill();
1071 }
1072 reset_generation_reserves();
1073 }
1074
1075 void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() {
1076 const char* msg = "Coalescing and filling old regions";
1077 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill);
1078
1079 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
1080 EventMark em("%s", msg);
1081 ShenandoahWorkerScope scope(workers(),
1082 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
1083 "concurrent coalesce and fill");
1084
1085 coalesce_and_fill_old_regions(true);
1086 }
1087
1088 void ShenandoahGenerationalHeap::update_region_ages(ShenandoahMarkingContext* ctx) {
1089 ShenandoahUpdateRegionAges cl(ctx);
1090 parallel_heap_region_iterate(&cl);
1091 }