1 /*
2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
3 * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
27 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29 #include "gc/shenandoah/shenandoahFreeSet.hpp"
30 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
31 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
32 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
35 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
36 #include "gc/shenandoah/shenandoahInitLogger.hpp"
37 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
38 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
39 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
41 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
42 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
43 #include "gc/shenandoah/shenandoahUtils.hpp"
44 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
45 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
46 #include "logging/log.hpp"
47 #include "utilities/events.hpp"
48
49
50 class ShenandoahGenerationalInitLogger : public ShenandoahInitLogger {
51 public:
52 static void print() {
53 ShenandoahGenerationalInitLogger logger;
54 logger.print_all();
55 }
56 protected:
57 void print_gc_specific() override {
58 ShenandoahInitLogger::print_gc_specific();
59
60 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
61 log_info(gc, init)("Young Heuristics: %s", heap->young_generation()->heuristics()->name());
62 log_info(gc, init)("Old Heuristics: %s", heap->old_generation()->heuristics()->name());
63 }
64 };
65
66 size_t ShenandoahGenerationalHeap::calculate_min_plab() {
67 return align_up(PLAB::min_size(), CardTable::card_size_in_words());
68 }
69
70 size_t ShenandoahGenerationalHeap::calculate_max_plab() {
71 size_t MaxTLABSizeWords = ShenandoahHeapRegion::max_tlab_size_words();
72 return align_down(MaxTLABSizeWords, CardTable::card_size_in_words());
73 }
74
75 // Returns size in bytes
76 size_t ShenandoahGenerationalHeap::unsafe_max_tlab_alloc(Thread *thread) const {
77 return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available());
78 }
79
80 ShenandoahGenerationalHeap::ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy) :
81 ShenandoahHeap(policy),
82 _age_census(nullptr),
83 _evac_tracker(new ShenandoahEvacuationTracker()),
84 _min_plab_size(calculate_min_plab()),
85 _max_plab_size(calculate_max_plab()),
86 _regulator_thread(nullptr),
87 _young_gen_memory_pool(nullptr),
88 _old_gen_memory_pool(nullptr) {
89 assert(is_aligned(_min_plab_size, CardTable::card_size_in_words()), "min_plab_size must be aligned");
90 assert(is_aligned(_max_plab_size, CardTable::card_size_in_words()), "max_plab_size must be aligned");
91 }
92
93 void ShenandoahGenerationalHeap::post_initialize() {
94 ShenandoahHeap::post_initialize();
95 _age_census = new ShenandoahAgeCensus();
96 }
97
98 void ShenandoahGenerationalHeap::print_init_logger() const {
99 ShenandoahGenerationalInitLogger logger;
100 logger.print_all();
101 }
102
103 void ShenandoahGenerationalHeap::print_tracing_info() const {
104 ShenandoahHeap::print_tracing_info();
105
106 LogTarget(Info, gc, stats) lt;
107 if (lt.is_enabled()) {
108 LogStream ls(lt);
109 ls.cr();
110 ls.cr();
111 evac_tracker()->print_global_on(&ls);
112 }
113 }
114
115 void ShenandoahGenerationalHeap::initialize_heuristics() {
116 // Initialize global generation and heuristics even in generational mode.
117 ShenandoahHeap::initialize_heuristics();
118
119 // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity
120 // for old would be total heap - minimum capacity of young. This means the sum of the maximum
121 // allowed for old and young could exceed the total heap size. It remains the case that the
122 // _actual_ capacity of young + old = total.
123 _generation_sizer.heap_size_changed(max_capacity());
124 size_t initial_capacity_young = _generation_sizer.max_young_size();
125 size_t max_capacity_young = _generation_sizer.max_young_size();
126 size_t initial_capacity_old = max_capacity() - max_capacity_young;
127 size_t max_capacity_old = max_capacity() - initial_capacity_young;
128
129 _young_generation = new ShenandoahYoungGeneration(max_workers(), max_capacity_young);
130 _old_generation = new ShenandoahOldGeneration(max_workers(), max_capacity_old);
131 _young_generation->initialize_heuristics(mode());
132 _old_generation->initialize_heuristics(mode());
133 }
134
135 void ShenandoahGenerationalHeap::initialize_serviceability() {
136 assert(mode()->is_generational(), "Only for the generational mode");
137 _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
138 _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this);
139 cycle_memory_manager()->add_pool(_young_gen_memory_pool);
140 cycle_memory_manager()->add_pool(_old_gen_memory_pool);
141 stw_memory_manager()->add_pool(_young_gen_memory_pool);
142 stw_memory_manager()->add_pool(_old_gen_memory_pool);
143 }
144
145 GrowableArray<MemoryPool*> ShenandoahGenerationalHeap::memory_pools() {
146 assert(mode()->is_generational(), "Only for the generational mode");
147 GrowableArray<MemoryPool*> memory_pools(2);
148 memory_pools.append(_young_gen_memory_pool);
149 memory_pools.append(_old_gen_memory_pool);
150 return memory_pools;
151 }
152
153 void ShenandoahGenerationalHeap::initialize_controller() {
154 auto control_thread = new ShenandoahGenerationalControlThread();
155 _control_thread = control_thread;
156 _regulator_thread = new ShenandoahRegulatorThread(control_thread);
157 }
158
159 void ShenandoahGenerationalHeap::gc_threads_do(ThreadClosure* tcl) const {
160 if (!shenandoah_policy()->is_at_shutdown()) {
161 ShenandoahHeap::gc_threads_do(tcl);
162 tcl->do_thread(regulator_thread());
163 }
164 }
165
166 void ShenandoahGenerationalHeap::stop() {
167 ShenandoahHeap::stop();
168 regulator_thread()->stop();
169 }
170
171 bool ShenandoahGenerationalHeap::requires_barriers(stackChunkOop obj) const {
172 if (is_idle()) {
173 return false;
174 }
175
176 if (is_concurrent_young_mark_in_progress() && is_in_young(obj) && !marking_context()->allocated_after_mark_start(obj)) {
177 // We are marking young, this object is in young, and it is below the TAMS
178 return true;
179 }
180
181 if (is_in_old(obj)) {
182 // Card marking barriers are required for objects in the old generation
183 return true;
184 }
185
186 if (has_forwarded_objects()) {
187 // Object may have pointers that need to be updated
188 return true;
189 }
190
191 return false;
192 }
193
194 void ShenandoahGenerationalHeap::evacuate_collection_set(bool concurrent) {
195 ShenandoahRegionIterator regions;
196 ShenandoahGenerationalEvacuationTask task(this, ®ions, concurrent, false /* only promote regions */);
197 workers()->run_task(&task);
198 }
199
200 void ShenandoahGenerationalHeap::promote_regions_in_place(bool concurrent) {
201 ShenandoahRegionIterator regions;
202 ShenandoahGenerationalEvacuationTask task(this, ®ions, concurrent, true /* only promote regions */);
203 workers()->run_task(&task);
204 }
205
206 oop ShenandoahGenerationalHeap::evacuate_object(oop p, Thread* thread) {
207 assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
208 if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
209 // This thread went through the OOM during evac protocol and it is safe to return
210 // the forward pointer. It must not attempt to evacuate anymore.
211 return ShenandoahBarrierSet::resolve_forwarded(p);
212 }
213
214 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
215
216 ShenandoahHeapRegion* r = heap_region_containing(p);
217 assert(!r->is_humongous(), "never evacuate humongous objects");
218
219 ShenandoahAffiliation target_gen = r->affiliation();
220 // gc_generation() can change asynchronously and should not be used here.
221 assert(active_generation() != nullptr, "Error");
222 if (active_generation()->is_young() && target_gen == YOUNG_GENERATION) {
223 markWord mark = p->mark();
224 if (mark.is_marked()) {
225 // Already forwarded.
226 return ShenandoahBarrierSet::resolve_forwarded(p);
227 }
228
229 if (mark.has_displaced_mark_helper()) {
230 // We don't want to deal with MT here just to ensure we read the right mark word.
231 // Skip the potential promotion attempt for this one.
232 } else if (r->age() + mark.age() >= age_census()->tenuring_threshold()) {
233 oop result = try_evacuate_object(p, thread, r, OLD_GENERATION);
234 if (result != nullptr) {
235 return result;
236 }
237 // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen.
238 }
239 }
240 return try_evacuate_object(p, thread, r, target_gen);
241 }
242
243 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
244 // to OLD_GENERATION.
245 oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
246 ShenandoahAffiliation target_gen) {
247 bool alloc_from_lab = true;
248 bool has_plab = false;
249 HeapWord* copy = nullptr;
250 size_t size = ShenandoahForwarding::size(p);
251 bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young();
252
253 #ifdef ASSERT
254 if (ShenandoahOOMDuringEvacALot &&
255 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
256 copy = nullptr;
257 } else {
258 #endif
259 if (UseTLAB) {
260 switch (target_gen) {
261 case YOUNG_GENERATION: {
262 copy = allocate_from_gclab(thread, size);
263 if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
264 // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve. Try resetting
265 // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
266 ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
267 copy = allocate_from_gclab(thread, size);
268 // If we still get nullptr, we'll try a shared allocation below.
269 }
270 break;
271 }
272 case OLD_GENERATION: {
273 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
274 if (plab != nullptr) {
275 has_plab = true;
276 copy = allocate_from_plab(thread, size, is_promotion);
277 if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) &&
278 ShenandoahThreadLocalData::plab_retries_enabled(thread)) {
279 // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because
280 // the requested object does not fit within the current plab but the plab still has an "abundance" of memory,
281 // where abundance is defined as >= ShenGenHeap::plab_min_size(). In the former case, we try shrinking the
282 // desired PLAB size to the minimum and retry PLAB allocation to avoid cascading of shared memory allocations.
283 if (plab->words_remaining() < plab_min_size()) {
284 ShenandoahThreadLocalData::set_plab_size(thread, plab_min_size());
285 copy = allocate_from_plab(thread, size, is_promotion);
286 // If we still get nullptr, we'll try a shared allocation below.
287 if (copy == nullptr) {
288 // If retry fails, don't continue to retry until we have success (probably in next GC pass)
289 ShenandoahThreadLocalData::disable_plab_retries(thread);
290 }
291 }
292 // else, copy still equals nullptr. this causes shared allocation below, preserving this plab for future needs.
293 }
294 }
295 break;
296 }
297 default: {
298 ShouldNotReachHere();
299 break;
300 }
301 }
302 }
303
304 if (copy == nullptr) {
305 // If we failed to allocate in LAB, we'll try a shared allocation.
306 if (!is_promotion || !has_plab || (size > PLAB::min_size())) {
307 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen, is_promotion);
308 copy = allocate_memory(req);
309 alloc_from_lab = false;
310 }
311 // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
312 // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
313 // costly. Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
314 // evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
315 }
316 #ifdef ASSERT
317 }
318 #endif
319
320 if (copy == nullptr) {
321 if (target_gen == OLD_GENERATION) {
322 if (from_region->is_young()) {
323 // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
324 old_generation()->handle_failed_promotion(thread, size);
325 return nullptr;
326 } else {
327 // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
328 // after the evacuation threads have finished.
329 old_generation()->handle_failed_evacuation();
330 }
331 }
332
333 control_thread()->handle_alloc_failure_evac(size);
334
335 oom_evac_handler()->handle_out_of_memory_during_evacuation();
336
337 return ShenandoahBarrierSet::resolve_forwarded(p);
338 }
339
340 // Copy the object:
341 NOT_PRODUCT(evac_tracker()->begin_evacuation(thread, size * HeapWordSize));
342 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
343 oop copy_val = cast_to_oop(copy);
344
345 // Update the age of the evacuated object
346 if (target_gen == YOUNG_GENERATION && is_aging_cycle()) {
347 ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1);
348 }
349
350 // Try to install the new forwarding pointer.
351 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
352 if (result == copy_val) {
353 // Successfully evacuated. Our copy is now the public one!
354
355 // This is necessary for virtual thread support. This uses the mark word without
356 // considering that it may now be a forwarding pointer (and could therefore crash).
357 // Secondarily, we do not want to spend cycles relativizing stack chunks for oops
358 // that lost the evacuation race (and will therefore not become visible). It is
359 // safe to do this on the public copy (this is also done during concurrent mark).
360 ContinuationGCSupport::relativize_stack_chunk(copy_val);
361
362 // Record that the evacuation succeeded
363 NOT_PRODUCT(evac_tracker()->end_evacuation(thread, size * HeapWordSize));
364
365 if (target_gen == OLD_GENERATION) {
366 old_generation()->handle_evacuation(copy, size, from_region->is_young());
367 } else {
368 // When copying to the old generation above, we don't care
369 // about recording object age in the census stats.
370 assert(target_gen == YOUNG_GENERATION, "Error");
371 // We record this census only when simulating pre-adaptive tenuring behavior, or
372 // when we have been asked to record the census at evacuation rather than at mark
373 if (ShenandoahGenerationalCensusAtEvac || !ShenandoahGenerationalAdaptiveTenuring) {
374 evac_tracker()->record_age(thread, size * HeapWordSize, ShenandoahHeap::get_object_age(copy_val));
375 }
376 }
377 shenandoah_assert_correct(nullptr, copy_val);
378 return copy_val;
379 } else {
380 // Failed to evacuate. We need to deal with the object that is left behind. Since this
381 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
382 // But if it happens to contain references to evacuated regions, those references would
383 // not get updated for this stale copy during this cycle, and we will crash while scanning
384 // it the next cycle.
385 if (alloc_from_lab) {
386 // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
387 // object will overwrite this stale copy, or the filler object on LAB retirement will
388 // do this.
389 switch (target_gen) {
390 case YOUNG_GENERATION: {
391 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
392 break;
393 }
394 case OLD_GENERATION: {
395 ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size);
396 if (is_promotion) {
397 ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize);
398 }
399 break;
400 }
401 default: {
402 ShouldNotReachHere();
403 break;
404 }
405 }
406 } else {
407 // For non-LAB allocations, we have no way to retract the allocation, and
408 // have to explicitly overwrite the copy with the filler object. With that overwrite,
409 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
410 assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
411 fill_with_object(copy, size);
412 shenandoah_assert_correct(nullptr, copy_val);
413 // For non-LAB allocations, the object has already been registered
414 }
415 shenandoah_assert_correct(nullptr, result);
416 return result;
417 }
418 }
419
420 inline HeapWord* ShenandoahGenerationalHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) {
421 assert(UseTLAB, "TLABs should be enabled");
422
423 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
424 HeapWord* obj;
425
426 if (plab == nullptr) {
427 assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name());
428 // No PLABs in this thread, fallback to shared allocation
429 return nullptr;
430 } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
431 return nullptr;
432 }
433 // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy
434 obj = plab->allocate(size);
435 if ((obj == nullptr) && (plab->words_remaining() < plab_min_size())) {
436 // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations
437 obj = allocate_from_plab_slow(thread, size, is_promotion);
438 }
439 // if plab->words_remaining() >= ShenGenHeap::heap()->plab_min_size(), just return nullptr so we can use a shared allocation
440 if (obj == nullptr) {
441 return nullptr;
442 }
443
444 if (is_promotion) {
445 ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize);
446 }
447 return obj;
448 }
449
450 // Establish a new PLAB and allocate size HeapWords within it.
451 HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
452 // New object should fit the PLAB size
453
454 assert(mode()->is_generational(), "PLABs only relevant to generational GC");
455 const size_t plab_min_size = this->plab_min_size();
456 // PLABs are aligned to card boundaries to avoid synchronization with concurrent
457 // allocations in other PLABs.
458 const size_t min_size = (size > plab_min_size)? align_up(size, CardTable::card_size_in_words()): plab_min_size;
459
460 // Figure out size of new PLAB, using value determined at last refill.
461 size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
462 if (cur_size == 0) {
463 cur_size = plab_min_size;
464 }
465
466 // Expand aggressively, doubling at each refill in this epoch, ceiling at plab_max_size()
467 size_t future_size = MIN2(cur_size * 2, plab_max_size());
468 // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor
469 // are card multiples.)
470 assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: %zu"
471 ", card_size: %zu, cur_size: %zu, max: %zu",
472 future_size, (size_t) CardTable::card_size_in_words(), cur_size, plab_max_size());
473
474 // Record new heuristic value even if we take any shortcut. This captures
475 // the case when moderately-sized objects always take a shortcut. At some point,
476 // heuristics should catch up with them. Note that the requested cur_size may
477 // not be honored, but we remember that this is the preferred size.
478 log_debug(gc, free)("Set new PLAB size: %zu", future_size);
479 ShenandoahThreadLocalData::set_plab_size(thread, future_size);
480 if (cur_size < size) {
481 // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
482 // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
483 log_debug(gc, free)("Current PLAB size (%zu) is too small for %zu", cur_size, size);
484 return nullptr;
485 }
486
487 // Retire current PLAB, and allocate a new one.
488 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
489 if (plab->words_remaining() < plab_min_size) {
490 // Retire current PLAB. This takes care of any PLAB book-keeping.
491 // retire_plab() registers the remnant filler object with the remembered set scanner without a lock.
492 // Since PLABs are card-aligned, concurrent registrations in other PLABs don't interfere.
493 retire_plab(plab, thread);
494
495 size_t actual_size = 0;
496 HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
497 if (plab_buf == nullptr) {
498 if (min_size == plab_min_size) {
499 // Disable PLAB promotions for this thread because we cannot even allocate a minimal PLAB. This allows us
500 // to fail faster on subsequent promotion attempts.
501 ShenandoahThreadLocalData::disable_plab_promotions(thread);
502 }
503 return nullptr;
504 } else {
505 ShenandoahThreadLocalData::enable_plab_retries(thread);
506 }
507 // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail.
508 if (ZeroTLAB) {
509 // ... and clear it.
510 Copy::zero_to_words(plab_buf, actual_size);
511 } else {
512 // ...and zap just allocated object.
513 #ifdef ASSERT
514 // Skip mangling the space corresponding to the object header to
515 // ensure that the returned space is not considered parsable by
516 // any concurrent GC thread.
517 size_t hdr_size = oopDesc::header_size();
518 Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
519 #endif // ASSERT
520 }
521 assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design");
522 plab->set_buf(plab_buf, actual_size);
523 if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
524 return nullptr;
525 }
526 return plab->allocate(size);
527 } else {
528 // If there's still at least min_size() words available within the current plab, don't retire it. Let's nibble
529 // away on this plab as long as we can. Meanwhile, return nullptr to force this particular allocation request
530 // to be satisfied with a shared allocation. By packing more promotions into the previously allocated PLAB, we
531 // reduce the likelihood of evacuation failures, and we reduce the need for downsizing our PLABs.
532 return nullptr;
533 }
534 }
535
536 HeapWord* ShenandoahGenerationalHeap::allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size) {
537 // Align requested sizes to card-sized multiples. Align down so that we don't violate max size of TLAB.
538 assert(is_aligned(min_size, CardTable::card_size_in_words()), "Align by design");
539 assert(word_size >= min_size, "Requested PLAB is too small");
540
541 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
542 // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
543 // if we are at risk of infringing on the old-gen evacuation budget.
544 HeapWord* res = allocate_memory(req);
545 if (res != nullptr) {
546 *actual_size = req.actual_size();
547 } else {
548 *actual_size = 0;
549 }
550 assert(is_aligned(res, CardTable::card_size_in_words()), "Align by design");
551 return res;
552 }
553
554 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) {
555 // We don't enforce limits on plab evacuations. We let it consume all available old-gen memory in order to reduce
556 // probability of an evacuation failure. We do enforce limits on promotion, to make sure that excessive promotion
557 // does not result in an old-gen evacuation failure. Note that a failed promotion is relatively harmless. Any
558 // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
559
560 // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
561 // promotions. Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
562 // 1. Some of the plab may have been dedicated to evacuations.
563 // 2. Some of the plab may have been abandoned due to waste (at the end of the plab).
564 size_t not_promoted =
565 ShenandoahThreadLocalData::get_plab_actual_size(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
566 ShenandoahThreadLocalData::reset_plab_promoted(thread);
567 ShenandoahThreadLocalData::set_plab_actual_size(thread, 0);
568 if (not_promoted > 0) {
569 old_generation()->unexpend_promoted(not_promoted);
570 }
571 const size_t original_waste = plab->waste();
572 HeapWord* const top = plab->top();
573
574 // plab->retire() overwrites unused memory between plab->top() and plab->hard_end() with a dummy object to make memory parsable.
575 // It adds the size of this unused memory, in words, to plab->waste().
576 plab->retire();
577 if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) {
578 // If retiring the plab created a filler object, then we need to register it with our card scanner so it can
579 // safely walk the region backing the plab.
580 log_debug(gc)("retire_plab() is registering remnant of size %zu at " PTR_FORMAT,
581 plab->waste() - original_waste, p2i(top));
582 // No lock is necessary because the PLAB memory is aligned on card boundaries.
583 old_generation()->card_scan()->register_object_without_lock(top);
584 }
585 }
586
587 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) {
588 Thread* thread = Thread::current();
589 retire_plab(plab, thread);
590 }
591
592 ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() {
593 shenandoah_assert_heaplocked_or_safepoint();
594
595 ShenandoahOldGeneration* old_gen = old_generation();
596 const ssize_t old_region_balance = old_gen->get_region_balance();
597 old_gen->set_region_balance(0);
598
599 if (old_region_balance > 0) {
600 const auto old_region_surplus = checked_cast<size_t>(old_region_balance);
601 const bool success = generation_sizer()->transfer_to_young(old_region_surplus);
602 return TransferResult {
603 success, old_region_surplus, "young"
604 };
605 }
606
607 if (old_region_balance < 0) {
608 const auto old_region_deficit = checked_cast<size_t>(-old_region_balance);
609 const bool success = generation_sizer()->transfer_to_old(old_region_deficit);
610 if (!success) {
611 old_gen->handle_failed_transfer();
612 }
613 return TransferResult {
614 success, old_region_deficit, "old"
615 };
616 }
617
618 return TransferResult {true, 0, "none"};
619 }
620
621 // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations
622 // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to
623 // xfer_limit, and any surplus is transferred to the young generation.
624 // xfer_limit is the maximum we're able to transfer from young to old.
625 void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) {
626
627 // We can limit the old reserve to the size of anticipated promotions:
628 // max_old_reserve is an upper bound on memory evacuated from old and promoted to old,
629 // clamped by the old generation space available.
630 //
631 // Here's the algebra.
632 // Let SOEP = ShenandoahOldEvacRatioPercent,
633 // OE = old evac,
634 // YE = young evac, and
635 // TE = total evac = OE + YE
636 // By definition:
637 // SOEP/100 = OE/TE
638 // = OE/(OE+YE)
639 // => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c)
640 // = OE/YE
641 // => OE = YE*SOEP/(100-SOEP)
642
643 // We have to be careful in the event that SOEP is set to 100 by the user.
644 assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
645 const size_t old_available = old_generation()->available();
646 // The free set will reserve this amount of memory to hold young evacuations
647 const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
648
649 // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit.
650
651 const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve;
652 const double max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)?
653 bound_on_old_reserve: MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) / double(100 - ShenandoahOldEvacRatioPercent),
654 bound_on_old_reserve);
655
656 const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
657
658 // Decide how much old space we should reserve for a mixed collection
659 double reserve_for_mixed = 0;
660 if (old_generation()->has_unprocessed_collection_candidates()) {
661 // We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we
662 // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
663 const double max_evac_need = (double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste);
664 assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
665 "Unaffiliated available must be less than total available");
666 const double old_fragmented_available = double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes);
667 reserve_for_mixed = max_evac_need + old_fragmented_available;
668 if (reserve_for_mixed > max_old_reserve) {
669 reserve_for_mixed = max_old_reserve;
670 }
671 }
672
673 // Decide how much space we should reserve for promotions from young
674 size_t reserve_for_promo = 0;
675 const size_t promo_load = old_generation()->get_promotion_potential();
676 const bool doing_promotions = promo_load > 0;
677 if (doing_promotions) {
678 // We're promoting and have a bound on the maximum amount that can be promoted
679 assert(max_old_reserve >= reserve_for_mixed, "Sanity");
680 const size_t available_for_promotions = max_old_reserve - reserve_for_mixed;
681 reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions);
682 }
683
684 // This is the total old we want to ideally reserve
685 const size_t old_reserve = reserve_for_mixed + reserve_for_promo;
686 assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations");
687
688 // We now check if the old generation is running a surplus or a deficit.
689 const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes;
690 if (max_old_available >= old_reserve) {
691 // We are running a surplus, so the old region surplus can go to young
692 const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes;
693 const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions;
694 const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions);
695 old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
696 } else {
697 // We are running a deficit which we'd like to fill from young.
698 // Ignore that this will directly impact young_generation()->max_capacity(),
699 // indirectly impacting young_reserve and old_reserve. These computations are conservative.
700 // Note that deficit is rounded up by one region.
701 const size_t old_need = (old_reserve - max_old_available + region_size_bytes - 1) / region_size_bytes;
702 const size_t max_old_region_xfer = old_xfer_limit / region_size_bytes;
703
704 // Round down the regions we can transfer from young to old. If we're running short
705 // on young-gen memory, we restrict the xfer. Old-gen collection activities will be
706 // curtailed if the budget is restricted.
707 const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer);
708 old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
709 }
710 }
711
712 void ShenandoahGenerationalHeap::reset_generation_reserves() {
713 young_generation()->set_evacuation_reserve(0);
714 old_generation()->set_evacuation_reserve(0);
715 old_generation()->set_promoted_reserve(0);
716 }
717
718 void ShenandoahGenerationalHeap::TransferResult::print_on(const char* when, outputStream* ss) const {
719 auto heap = ShenandoahGenerationalHeap::heap();
720 ShenandoahYoungGeneration* const young_gen = heap->young_generation();
721 ShenandoahOldGeneration* const old_gen = heap->old_generation();
722 const size_t young_available = young_gen->available();
723 const size_t old_available = old_gen->available();
724 ss->print_cr("After %s, %s %zu regions to %s to prepare for next gc, old available: "
725 PROPERFMT ", young_available: " PROPERFMT,
726 when,
727 success? "successfully transferred": "failed to transfer", region_count, region_destination,
728 PROPERFMTARGS(old_available), PROPERFMTARGS(young_available));
729 }
730
731 void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) {
732 class ShenandoahGlobalCoalesceAndFill : public WorkerTask {
733 private:
734 ShenandoahPhaseTimings::Phase _phase;
735 ShenandoahRegionIterator _regions;
736 public:
737 explicit ShenandoahGlobalCoalesceAndFill(ShenandoahPhaseTimings::Phase phase) :
738 WorkerTask("Shenandoah Global Coalesce"),
739 _phase(phase) {}
740
741 void work(uint worker_id) override {
742 ShenandoahWorkerTimingsTracker timer(_phase,
743 ShenandoahPhaseTimings::ScanClusters,
744 worker_id, true);
745 ShenandoahHeapRegion* region;
746 while ((region = _regions.next()) != nullptr) {
747 // old region is not in the collection set and was not immediately trashed
748 if (region->is_old() && region->is_active() && !region->is_humongous()) {
749 // Reset the coalesce and fill boundary because this is a global collect
750 // and cannot be preempted by young collects. We want to be sure the entire
751 // region is coalesced here and does not resume from a previously interrupted
752 // or completed coalescing.
753 region->begin_preemptible_coalesce_and_fill();
754 region->oop_coalesce_and_fill(false);
755 }
756 }
757 }
758 };
759
760 ShenandoahPhaseTimings::Phase phase = concurrent ?
761 ShenandoahPhaseTimings::conc_coalesce_and_fill :
762 ShenandoahPhaseTimings::degen_gc_coalesce_and_fill;
763
764 // This is not cancellable
765 ShenandoahGlobalCoalesceAndFill coalesce(phase);
766 workers()->run_task(&coalesce);
767 old_generation()->set_parsable(true);
768 }
769
770 template<bool CONCURRENT>
771 class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
772 private:
773 ShenandoahGenerationalHeap* _heap;
774 ShenandoahRegionIterator* _regions;
775 ShenandoahRegionChunkIterator* _work_chunks;
776
777 public:
778 explicit ShenandoahGenerationalUpdateHeapRefsTask(ShenandoahRegionIterator* regions,
779 ShenandoahRegionChunkIterator* work_chunks) :
780 WorkerTask("Shenandoah Update References"),
781 _heap(ShenandoahGenerationalHeap::heap()),
782 _regions(regions),
783 _work_chunks(work_chunks)
784 {
785 bool old_bitmap_stable = _heap->old_generation()->is_mark_complete();
786 log_debug(gc, remset)("Update refs, scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
787 }
788
789 void work(uint worker_id) {
790 if (CONCURRENT) {
791 ShenandoahConcurrentWorkerSession worker_session(worker_id);
792 ShenandoahSuspendibleThreadSetJoiner stsj;
793 do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
794 } else {
795 ShenandoahParallelWorkerSession worker_session(worker_id);
796 do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
797 }
798 }
799
800 private:
801 template<class T>
802 void do_work(uint worker_id) {
803 T cl;
804
805 if (CONCURRENT && (worker_id == 0)) {
806 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
807 // results of evacuation. These reserves are no longer necessary because evacuation has completed.
808 size_t cset_regions = _heap->collection_set()->count();
809
810 // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
811 // to the mutator free set. At the end of GC, we will have cset_regions newly evacuated fully empty regions from
812 // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
813 // next GC cycle.
814 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
815 }
816 // If !CONCURRENT, there's no value in expanding Mutator free set
817
818 ShenandoahHeapRegion* r = _regions->next();
819 // We update references for global, old, and young collections.
820 ShenandoahGeneration* const gc_generation = _heap->gc_generation();
821 shenandoah_assert_generations_reconciled();
822 assert(gc_generation->is_mark_complete(), "Expected complete marking");
823 ShenandoahMarkingContext* const ctx = _heap->marking_context();
824 bool is_mixed = _heap->collection_set()->has_old_regions();
825 while (r != nullptr) {
826 HeapWord* update_watermark = r->get_update_watermark();
827 assert(update_watermark >= r->bottom(), "sanity");
828
829 log_debug(gc)("Update refs worker " UINT32_FORMAT ", looking at region %zu", worker_id, r->index());
830 bool region_progress = false;
831 if (r->is_active() && !r->is_cset()) {
832 if (r->is_young()) {
833 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
834 region_progress = true;
835 } else if (r->is_old()) {
836 if (gc_generation->is_global()) {
837
838 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
839 region_progress = true;
840 }
841 // Otherwise, this is an old region in a young or mixed cycle. Process it during a second phase, below.
842 // Don't bother to report pacing progress in this case.
843 } else {
844 // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
845 // to a non-free active region while this loop is executing. Whenever this happens, the changing of a region's
846 // active status may propagate at a different speed than the changing of the region's affiliation.
847
848 // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
849 // by this thread before the region's affiliation() is seen by this thread.
850
851 // It's ok for this race to occur because the newly transformed region does not have any references to be
852 // updated.
853
854 assert(r->get_update_watermark() == r->bottom(),
855 "%s Region %zu is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
856 r->affiliation_name(), r->index());
857 }
858 }
859
860 if (region_progress && ShenandoahPacing) {
861 _heap->pacer()->report_update_refs(pointer_delta(update_watermark, r->bottom()));
862 }
863
864 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
865 return;
866 }
867
868 r = _regions->next();
869 }
870
871 if (!gc_generation->is_global()) {
872 // Since this is generational and not GLOBAL, we have to process the remembered set. There's no remembered
873 // set processing if not in generational mode or if GLOBAL mode.
874
875 // After this thread has exhausted its traditional update-refs work, it continues with updating refs within
876 // remembered set. The remembered set workload is better balanced between threads, so threads that are "behind"
877 // can catch up with other threads during this phase, allowing all threads to work more effectively in parallel.
878 update_references_in_remembered_set(worker_id, cl, ctx, is_mixed);
879 }
880 }
881
882 template<class T>
883 void update_references_in_remembered_set(uint worker_id, T &cl, const ShenandoahMarkingContext* ctx, bool is_mixed) {
884
885 struct ShenandoahRegionChunk assignment;
886 ShenandoahScanRemembered* scanner = _heap->old_generation()->card_scan();
887
888 while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) {
889 // Keep grabbing next work chunk to process until finished, or asked to yield
890 ShenandoahHeapRegion* r = assignment._r;
891 if (r->is_active() && !r->is_cset() && r->is_old()) {
892 HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
893 HeapWord* end_of_range = r->get_update_watermark();
894 if (end_of_range > start_of_range + assignment._chunk_size) {
895 end_of_range = start_of_range + assignment._chunk_size;
896 }
897
898 if (start_of_range >= end_of_range) {
899 continue;
900 }
901
902 // Old region in a young cycle or mixed cycle.
903 if (is_mixed) {
904 if (r->is_humongous()) {
905 // Need to examine both dirty and clean cards during mixed evac.
906 r->oop_iterate_humongous_slice_all(&cl,start_of_range, assignment._chunk_size);
907 } else {
908 // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
909 // and filled. This will use mark bits to find objects that need to be updated.
910 update_references_in_old_region(cl, ctx, scanner, r, start_of_range, end_of_range);
911 }
912 } else {
913 // This is a young evacuation
914 size_t cluster_size = CardTable::card_size_in_words() * ShenandoahCardCluster::CardsPerCluster;
915 size_t clusters = assignment._chunk_size / cluster_size;
916 assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
917 scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id);
918 }
919
920 if (ShenandoahPacing) {
921 _heap->pacer()->report_update_refs(pointer_delta(end_of_range, start_of_range));
922 }
923 }
924 }
925 }
926
927 template<class T>
928 void update_references_in_old_region(T &cl, const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner,
929 const ShenandoahHeapRegion* r, HeapWord* start_of_range,
930 HeapWord* end_of_range) const {
931 // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
932 ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
933
934 // Any object that begins in a previous range is part of a different scanning assignment. Any object that
935 // starts after end_of_range is also not my responsibility. (Either allocated during evacuation, so does
936 // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
937
938 // Find the first object that begins in my range, if there is one. Note that `p` will be set to `end_of_range`
939 // when no live object is found in the range.
940 HeapWord* tams = ctx->top_at_mark_start(r);
941 HeapWord* p = get_first_object_start_word(ctx, scanner, tams, start_of_range, end_of_range);
942
943 while (p < end_of_range) {
944 // p is known to point to the beginning of marked object obj
945 oop obj = cast_to_oop(p);
946 objs.do_object(obj);
947 HeapWord* prev_p = p;
948 p += obj->size();
949 if (p < tams) {
950 p = ctx->get_next_marked_addr(p, tams);
951 // If there are no more marked objects before tams, this returns tams. Note that tams is
952 // either >= end_of_range, or tams is the start of an object that is marked.
953 }
954 assert(p != prev_p, "Lack of forward progress");
955 }
956 }
957
958 HeapWord* get_first_object_start_word(const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner, HeapWord* tams,
959 HeapWord* start_of_range, HeapWord* end_of_range) const {
960 HeapWord* p = start_of_range;
961
962 if (p >= tams) {
963 // We cannot use ctx->is_marked(obj) to test whether an object begins at this address. Instead,
964 // we need to use the remembered set crossing map to advance p to the first object that starts
965 // within the enclosing card.
966 size_t card_index = scanner->card_index_for_addr(start_of_range);
967 while (true) {
968 HeapWord* first_object = scanner->first_object_in_card(card_index);
969 if (first_object != nullptr) {
970 p = first_object;
971 break;
972 } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
973 card_index++;
974 } else {
975 // Signal that no object was found in range
976 p = end_of_range;
977 break;
978 }
979 }
980 } else if (!ctx->is_marked(cast_to_oop(p))) {
981 p = ctx->get_next_marked_addr(p, tams);
982 // If there are no more marked objects before tams, this returns tams.
983 // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
984 }
985 return p;
986 }
987 };
988
989 void ShenandoahGenerationalHeap::update_heap_references(bool concurrent) {
990 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
991 const uint nworkers = workers()->active_workers();
992 ShenandoahRegionChunkIterator work_list(nworkers);
993 if (concurrent) {
994 ShenandoahGenerationalUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list);
995 workers()->run_task(&task);
996 } else {
997 ShenandoahGenerationalUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list);
998 workers()->run_task(&task);
999 }
1000
1001 if (ShenandoahEnableCardStats) {
1002 // Only do this if we are collecting card stats
1003 ShenandoahScanRemembered* card_scan = old_generation()->card_scan();
1004 assert(card_scan != nullptr, "Card table must exist when card stats are enabled");
1005 card_scan->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS);
1006 }
1007 }
1008
1009 struct ShenandoahCompositeRegionClosure {
1010 template<typename C1, typename C2>
1011 class Closure : public ShenandoahHeapRegionClosure {
1012 private:
1013 C1 &_c1;
1014 C2 &_c2;
1015
1016 public:
1017 Closure(C1 &c1, C2 &c2) : ShenandoahHeapRegionClosure(), _c1(c1), _c2(c2) {}
1018
1019 void heap_region_do(ShenandoahHeapRegion* r) override {
1020 _c1.heap_region_do(r);
1021 _c2.heap_region_do(r);
1022 }
1023
1024 bool is_thread_safe() override {
1025 return _c1.is_thread_safe() && _c2.is_thread_safe();
1026 }
1027 };
1028
1029 template<typename C1, typename C2>
1030 static Closure<C1, C2> of(C1 &c1, C2 &c2) {
1031 return Closure<C1, C2>(c1, c2);
1032 }
1033 };
1034
1035 class ShenandoahUpdateRegionAges : public ShenandoahHeapRegionClosure {
1036 private:
1037 ShenandoahMarkingContext* _ctx;
1038
1039 public:
1040 explicit ShenandoahUpdateRegionAges(ShenandoahMarkingContext* ctx) : _ctx(ctx) { }
1041
1042 void heap_region_do(ShenandoahHeapRegion* r) override {
1043 // Maintenance of region age must follow evacuation in order to account for
1044 // evacuation allocations within survivor regions. We consult region age during
1045 // the subsequent evacuation to determine whether certain objects need to
1046 // be promoted.
1047 if (r->is_young() && r->is_active()) {
1048 HeapWord *tams = _ctx->top_at_mark_start(r);
1049 HeapWord *top = r->top();
1050
1051 // Allocations move the watermark when top moves. However, compacting
1052 // objects will sometimes lower top beneath the watermark, after which,
1053 // attempts to read the watermark will assert out (watermark should not be
1054 // higher than top).
1055 if (top > tams) {
1056 // There have been allocations in this region since the start of the cycle.
1057 // Any objects new to this region must not assimilate elevated age.
1058 r->reset_age();
1059 } else if (ShenandoahGenerationalHeap::heap()->is_aging_cycle()) {
1060 r->increment_age();
1061 }
1062 }
1063 }
1064
1065 bool is_thread_safe() override {
1066 return true;
1067 }
1068 };
1069
1070 void ShenandoahGenerationalHeap::final_update_refs_update_region_states() {
1071 ShenandoahSynchronizePinnedRegionStates pins;
1072 ShenandoahUpdateRegionAges ages(active_generation()->complete_marking_context());
1073 auto cl = ShenandoahCompositeRegionClosure::of(pins, ages);
1074 parallel_heap_region_iterate(&cl);
1075 }
1076
1077 void ShenandoahGenerationalHeap::complete_degenerated_cycle() {
1078 shenandoah_assert_heaplocked_or_safepoint();
1079 if (is_concurrent_old_mark_in_progress()) {
1080 // This is still necessary for degenerated cycles because the degeneration point may occur
1081 // after final mark of the young generation. See ShenandoahConcurrentGC::op_final_update_refs for
1082 // a more detailed explanation.
1083 old_generation()->transfer_pointers_from_satb();
1084 }
1085
1086 // We defer generation resizing actions until after cset regions have been recycled.
1087 TransferResult result = balance_generations();
1088 LogTarget(Info, gc, ergo) lt;
1089 if (lt.is_enabled()) {
1090 LogStream ls(lt);
1091 result.print_on("Degenerated GC", &ls);
1092 }
1093
1094 // In case degeneration interrupted concurrent evacuation or update references, we need to clean up
1095 // transient state. Otherwise, these actions have no effect.
1096 reset_generation_reserves();
1097
1098 if (!old_generation()->is_parsable()) {
1099 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill);
1100 coalesce_and_fill_old_regions(false);
1101 }
1102 }
1103
1104 void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
1105 if (!old_generation()->is_parsable()) {
1106 // Class unloading may render the card offsets unusable, so we must rebuild them before
1107 // the next remembered set scan. We _could_ let the control thread do this sometime after
1108 // the global cycle has completed and before the next young collection, but under memory
1109 // pressure the control thread may not have the time (that is, because it's running back
1110 // to back GCs). In that scenario, we would have to make the old regions parsable before
1111 // we could start a young collection. This could delay the start of the young cycle and
1112 // throw off the heuristics.
1113 entry_global_coalesce_and_fill();
1114 }
1115
1116 TransferResult result;
1117 {
1118 ShenandoahHeapLocker locker(lock());
1119
1120 result = balance_generations();
1121 reset_generation_reserves();
1122 }
1123
1124 LogTarget(Info, gc, ergo) lt;
1125 if (lt.is_enabled()) {
1126 LogStream ls(lt);
1127 result.print_on("Concurrent GC", &ls);
1128 }
1129 }
1130
1131 void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() {
1132 const char* msg = "Coalescing and filling old regions";
1133 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill);
1134
1135 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
1136 EventMark em("%s", msg);
1137 ShenandoahWorkerScope scope(workers(),
1138 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
1139 "concurrent coalesce and fill");
1140
1141 coalesce_and_fill_old_regions(true);
1142 }
1143
1144 void ShenandoahGenerationalHeap::update_region_ages(ShenandoahMarkingContext* ctx) {
1145 ShenandoahUpdateRegionAges cl(ctx);
1146 parallel_heap_region_iterate(&cl);
1147 }