1 /*
2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
3 * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
27 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29 #include "gc/shenandoah/shenandoahFreeSet.hpp"
30 #include "gc/shenandoah/shenandoahGeneration.hpp"
31 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
32 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
33 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
35 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
36 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
37 #include "gc/shenandoah/shenandoahInitLogger.hpp"
38 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
40 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
42 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
43 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
44 #include "gc/shenandoah/shenandoahUtils.hpp"
45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
46 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
47 #include "logging/log.hpp"
48 #include "utilities/events.hpp"
49
50
51 class ShenandoahGenerationalInitLogger : public ShenandoahInitLogger {
52 public:
53 static void print() {
54 ShenandoahGenerationalInitLogger logger;
55 logger.print_all();
56 }
57 protected:
58 void print_gc_specific() override {
59 ShenandoahInitLogger::print_gc_specific();
60
61 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
62 log_info(gc, init)("Young Heuristics: %s", heap->young_generation()->heuristics()->name());
63 log_info(gc, init)("Old Heuristics: %s", heap->old_generation()->heuristics()->name());
64 }
65 };
66
67 size_t ShenandoahGenerationalHeap::calculate_min_plab() {
68 return align_up(PLAB::min_size(), CardTable::card_size_in_words());
69 }
70
71 size_t ShenandoahGenerationalHeap::calculate_max_plab() {
72 size_t MaxTLABSizeWords = ShenandoahHeapRegion::max_tlab_size_words();
73 return align_down(MaxTLABSizeWords, CardTable::card_size_in_words());
74 }
75
76 // Returns size in bytes
77 size_t ShenandoahGenerationalHeap::unsafe_max_tlab_alloc() const {
78 return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available());
79 }
80
81 ShenandoahGenerationalHeap::ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy) :
82 ShenandoahHeap(policy),
83 _age_census(nullptr),
84 _min_plab_size(calculate_min_plab()),
85 _max_plab_size(calculate_max_plab()),
86 _regulator_thread(nullptr),
87 _young_gen_memory_pool(nullptr),
88 _old_gen_memory_pool(nullptr) {
89 assert(is_aligned(_min_plab_size, CardTable::card_size_in_words()), "min_plab_size must be aligned");
90 assert(is_aligned(_max_plab_size, CardTable::card_size_in_words()), "max_plab_size must be aligned");
91 }
92
93 void ShenandoahGenerationalHeap::initialize_generations() {
94 ShenandoahHeap::initialize_generations();
95 _young_generation->post_initialize(this);
96 _old_generation->post_initialize(this);
97 }
98
99 void ShenandoahGenerationalHeap::post_initialize() {
100 ShenandoahHeap::post_initialize();
101 _age_census = new ShenandoahAgeCensus();
102 }
103
104 void ShenandoahGenerationalHeap::post_initialize_heuristics() {
105 ShenandoahHeap::post_initialize_heuristics();
106 _young_generation->post_initialize_heuristics();
107 _old_generation->post_initialize_heuristics();
108 }
109
110 void ShenandoahGenerationalHeap::print_init_logger() const {
111 ShenandoahGenerationalInitLogger logger;
112 logger.print_all();
113 }
114
115 void ShenandoahGenerationalHeap::initialize_heuristics() {
116 // Initialize global generation and heuristics even in generational mode.
117 ShenandoahHeap::initialize_heuristics();
118
119 _young_generation = new ShenandoahYoungGeneration(max_workers());
120 _old_generation = new ShenandoahOldGeneration(max_workers());
121 _young_generation->initialize_heuristics(mode());
122 _old_generation->initialize_heuristics(mode());
123 }
124
125 void ShenandoahGenerationalHeap::initialize_serviceability() {
126 assert(mode()->is_generational(), "Only for the generational mode");
127 _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
128 _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this);
129 cycle_memory_manager()->add_pool(_young_gen_memory_pool);
130 cycle_memory_manager()->add_pool(_old_gen_memory_pool);
131 stw_memory_manager()->add_pool(_young_gen_memory_pool);
132 stw_memory_manager()->add_pool(_old_gen_memory_pool);
133 }
134
135 GrowableArray<MemoryPool*> ShenandoahGenerationalHeap::memory_pools() {
136 assert(mode()->is_generational(), "Only for the generational mode");
137 GrowableArray<MemoryPool*> memory_pools(2);
138 memory_pools.append(_young_gen_memory_pool);
139 memory_pools.append(_old_gen_memory_pool);
140 return memory_pools;
141 }
142
143 void ShenandoahGenerationalHeap::initialize_controller() {
144 auto control_thread = new ShenandoahGenerationalControlThread();
145 _control_thread = control_thread;
146 _regulator_thread = new ShenandoahRegulatorThread(control_thread);
147 }
148
149 void ShenandoahGenerationalHeap::gc_threads_do(ThreadClosure* tcl) const {
150 if (!shenandoah_policy()->is_at_shutdown()) {
151 ShenandoahHeap::gc_threads_do(tcl);
152 tcl->do_thread(regulator_thread());
153 }
154 }
155
156 void ShenandoahGenerationalHeap::stop() {
157 ShenandoahHeap::stop();
158 regulator_thread()->stop();
159 }
160
161 void ShenandoahGenerationalHeap::start_idle_span() {
162 young_generation()->heuristics()->start_idle_span();
163 }
164
165 bool ShenandoahGenerationalHeap::requires_barriers(stackChunkOop obj) const {
166 if (is_idle()) {
167 return false;
168 }
169
170 if (is_concurrent_young_mark_in_progress() && is_in_young(obj) && !marking_context()->allocated_after_mark_start(obj)) {
171 // We are marking young, this object is in young, and it is below the TAMS
172 return true;
173 }
174
175 if (is_in_old(obj)) {
176 // Card marking barriers are required for objects in the old generation
177 return true;
178 }
179
180 if (has_forwarded_objects()) {
181 // Object may have pointers that need to be updated
182 return true;
183 }
184
185 return false;
186 }
187
188 void ShenandoahGenerationalHeap::evacuate_collection_set(ShenandoahGeneration* generation, bool concurrent) {
189 ShenandoahRegionIterator regions;
190 ShenandoahGenerationalEvacuationTask task(this, generation, ®ions, concurrent, collection_set()->is_empty() /* only promote regions */);
191 workers()->run_task(&task);
192 }
193
194 void ShenandoahGenerationalHeap::promote_regions_in_place(ShenandoahGeneration* generation, bool concurrent) {
195 ShenandoahRegionIterator regions;
196 ShenandoahGenerationalEvacuationTask task(this, generation, ®ions, concurrent, true /* only promote regions */);
197 workers()->run_task(&task);
198 }
199
200 oop ShenandoahGenerationalHeap::evacuate_object(oop p, Thread* thread) {
201 assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
202 if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
203 // This thread went through the OOM during evac protocol and it is safe to return
204 // the forward pointer. It must not attempt to evacuate anymore.
205 return ShenandoahBarrierSet::resolve_forwarded(p);
206 }
207
208 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
209
210 ShenandoahHeapRegion* from_region = heap_region_containing(p);
211 assert(!from_region->is_humongous(), "never evacuate humongous objects");
212
213 // Try to keep the object in the same generation
214 const ShenandoahAffiliation target_gen = from_region->affiliation();
215
216 if (target_gen == YOUNG_GENERATION) {
217 markWord mark = p->mark();
218 if (mark.is_marked()) {
219 // Already forwarded.
220 return ShenandoahBarrierSet::resolve_forwarded(p);
221 }
222
223 if (mark.has_displaced_mark_helper()) {
224 // We don't want to deal with MT here just to ensure we read the right mark word.
225 // Skip the potential promotion attempt for this one.
226 } else if (age_census()->is_tenurable(from_region->age() + mark.age())) {
227 // If the object is tenurable, try to promote it
228 oop result = try_evacuate_object<YOUNG_GENERATION, OLD_GENERATION>(p, thread, from_region->age());
229
230 // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen.
231 if (result != nullptr) {
232 return result;
233 }
234 }
235 return try_evacuate_object<YOUNG_GENERATION, YOUNG_GENERATION>(p, thread, from_region->age());
236 }
237
238 assert(target_gen == OLD_GENERATION, "Expected evacuation to old");
239 return try_evacuate_object<OLD_GENERATION, OLD_GENERATION>(p, thread, from_region->age());
240 }
241
242 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
243 // to OLD_GENERATION.
244 template<ShenandoahAffiliation FROM_GENERATION, ShenandoahAffiliation TO_GENERATION>
245 oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, uint from_region_age) {
246 bool alloc_from_lab = true;
247 bool has_plab = false;
248 HeapWord* copy = nullptr;
249 size_t size = ShenandoahForwarding::size(p);
250 constexpr bool is_promotion = (TO_GENERATION == OLD_GENERATION) && (FROM_GENERATION == YOUNG_GENERATION);
251
252 #ifdef ASSERT
253 if (ShenandoahOOMDuringEvacALot &&
254 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
255 copy = nullptr;
256 } else {
257 #endif
258 if (UseTLAB) {
259 switch (TO_GENERATION) {
260 case YOUNG_GENERATION: {
261 copy = allocate_from_gclab(thread, size);
262 if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
263 // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve. Try resetting
264 // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
265 ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
266 copy = allocate_from_gclab(thread, size);
267 // If we still get nullptr, we'll try a shared allocation below.
268 }
269 break;
270 }
271 case OLD_GENERATION: {
272 ShenandoahPLAB* shenandoah_plab = ShenandoahThreadLocalData::shenandoah_plab(thread);
273 if (shenandoah_plab != nullptr) {
274 has_plab = true;
275 copy = shenandoah_plab->allocate(size, is_promotion);
276 if (copy == nullptr && size < shenandoah_plab->desired_size() && shenandoah_plab->retries_enabled()) {
277 // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because
278 // the requested object does not fit within the current plab but the plab still has an "abundance" of memory,
279 // where abundance is defined as >= ShenGenHeap::plab_min_size(). In the former case, we try shrinking the
280 // desired PLAB size to the minimum and retry PLAB allocation to avoid cascading of shared memory allocations.
281 // Shrinking the desired PLAB size may allow us to eke out a small PLAB while staying beneath evacuation reserve.
282 if (shenandoah_plab->plab()->words_remaining() < plab_min_size()) {
283 shenandoah_plab->set_desired_size(plab_min_size());
284 copy = shenandoah_plab->allocate(size, is_promotion);
285 if (copy == nullptr) {
286 // If we still get nullptr, we'll try a shared allocation below.
287 // However, don't continue to retry until we have success (probably in next GC pass)
288 shenandoah_plab->disable_retries();
289 }
290 }
291 }
292 }
293 break;
294 }
295 default: {
296 ShouldNotReachHere();
297 break;
298 }
299 }
300 }
301
302 if (copy == nullptr) {
303 // If we failed to allocate in LAB, we'll try a shared allocation.
304 if (!is_promotion || !has_plab || (size > PLAB::min_size())) {
305 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, TO_GENERATION, is_promotion);
306 copy = allocate_memory(req);
307 alloc_from_lab = false;
308 }
309 // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
310 // We choose not to promote objects smaller than size_threshold by way of shared allocations as this is too
311 // costly. Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
312 // evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= size_threshhold).
313 }
314 #ifdef ASSERT
315 }
316 #endif
317
318 if (copy == nullptr) {
319 if (TO_GENERATION == OLD_GENERATION) {
320 if (FROM_GENERATION == YOUNG_GENERATION) {
321 // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
322 old_generation()->handle_failed_promotion(thread, size);
323 return nullptr;
324 } else {
325 // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
326 // after the evacuation threads have finished.
327 old_generation()->handle_failed_evacuation();
328 }
329 }
330
331 control_thread()->handle_alloc_failure_evac(size);
332 oom_evac_handler()->handle_out_of_memory_during_evacuation();
333 return ShenandoahBarrierSet::resolve_forwarded(p);
334 }
335
336 if (ShenandoahEvacTracking) {
337 evac_tracker()->begin_evacuation(thread, size * HeapWordSize, FROM_GENERATION, TO_GENERATION);
338 }
339
340 // Copy the object:
341 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
342 oop copy_val = cast_to_oop(copy);
343
344 // Update the age of the evacuated object
345 if (TO_GENERATION == YOUNG_GENERATION && is_aging_cycle()) {
346 increase_object_age(copy_val, from_region_age + 1);
347 }
348
349 // Relativize stack chunks before publishing the copy. After the forwarding CAS,
350 // mutators can see the copy and thaw it via the fast path if flags == 0. We must
351 // relativize derived pointers and set gc_mode before that happens. Skip if the
352 // copy's mark word is already a forwarding pointer (another thread won the race
353 // and overwrote the original's header before we copied it).
354 if (!ShenandoahForwarding::is_forwarded(copy_val)) {
355 ContinuationGCSupport::relativize_stack_chunk(copy_val);
356 }
357
358 // Try to install the new forwarding pointer.
359 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
360 if (result == copy_val) {
361 // Successfully evacuated. Our copy is now the public one!
362 if (ShenandoahEvacTracking) {
363 // Record that the evacuation succeeded
364 evac_tracker()->end_evacuation(thread, size * HeapWordSize, FROM_GENERATION, TO_GENERATION);
365 }
366
367 if (TO_GENERATION == OLD_GENERATION) {
368 old_generation()->handle_evacuation(copy, size);
369 }
370 } else {
371 // Failed to evacuate. We need to deal with the object that is left behind. Since this
372 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
373 // But if it happens to contain references to evacuated regions, those references would
374 // not get updated for this stale copy during this cycle, and we will crash while scanning
375 // it the next cycle.
376 if (alloc_from_lab) {
377 // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
378 // object will overwrite this stale copy, or the filler object on LAB retirement will
379 // do this.
380 switch (TO_GENERATION) {
381 case YOUNG_GENERATION: {
382 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
383 break;
384 }
385 case OLD_GENERATION: {
386 ShenandoahThreadLocalData::shenandoah_plab(thread)->plab()->undo_allocation(copy, size);
387 if (is_promotion) {
388 ShenandoahThreadLocalData::shenandoah_plab(thread)->subtract_from_promoted(size * HeapWordSize);
389 }
390 break;
391 }
392 default: {
393 ShouldNotReachHere();
394 break;
395 }
396 }
397 } else {
398 // For non-LAB allocations, we have no way to retract the allocation, and
399 // have to explicitly overwrite the copy with the filler object. With that overwrite,
400 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
401 assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
402 fill_with_object(copy, size);
403 }
404 }
405 shenandoah_assert_correct(nullptr, result);
406 return result;
407 }
408
409 template oop ShenandoahGenerationalHeap::try_evacuate_object<YOUNG_GENERATION, YOUNG_GENERATION>(oop p, Thread* thread, uint from_region_age);
410 template oop ShenandoahGenerationalHeap::try_evacuate_object<YOUNG_GENERATION, OLD_GENERATION>(oop p, Thread* thread, uint from_region_age);
411 template oop ShenandoahGenerationalHeap::try_evacuate_object<OLD_GENERATION, OLD_GENERATION>(oop p, Thread* thread, uint from_region_age);
412
413 // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations
414 // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to
415 // mutator_xfer_limit, and any surplus is transferred to the young generation. mutator_xfer_limit is
416 // the maximum we're able to transfer from young to old. This is called at the end of GC, as we prepare
417 // for the idle span that precedes the next GC.
418 void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_xfer_limit,
419 size_t old_trashed_regions, size_t young_trashed_regions) {
420 shenandoah_assert_heaplocked();
421 // We can limit the old reserve to the size of anticipated promotions:
422 // max_old_reserve is an upper bound on memory evacuated from old and promoted to old,
423 // clamped by the old generation space available.
424 //
425 // Here's the algebra.
426 // Let SOEP = ShenandoahOldEvacPercent,
427 // OE = old evac,
428 // YE = young evac, and
429 // TE = total evac = OE + YE
430 // By definition:
431 // SOEP/100 = OE/TE
432 // = OE/(OE+YE)
433 // => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c)
434 // = OE/YE
435 // => OE = YE*SOEP/(100-SOEP)
436
437 // We have to be careful in the event that SOEP is set to 100 by the user.
438 assert(ShenandoahOldEvacPercent <= 100, "Error");
439 const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
440
441 ShenandoahOldGeneration* old_gen = old_generation();
442 size_t old_capacity = old_gen->max_capacity();
443 size_t old_usage = old_gen->used(); // includes humongous waste
444 size_t old_currently_available =
445 ((old_capacity >= old_usage)? old_capacity - old_usage: 0) + old_trashed_regions * region_size_bytes;
446
447 ShenandoahYoungGeneration* young_gen = young_generation();
448 size_t young_capacity = young_gen->max_capacity();
449 size_t young_usage = young_gen->used(); // includes humongous waste
450 size_t young_available = ((young_capacity >= young_usage)? young_capacity - young_usage: 0);
451 size_t freeset_available = free_set()->available_locked();
452 if (young_available > freeset_available) {
453 young_available = freeset_available;
454 }
455 young_available += young_trashed_regions * region_size_bytes;
456
457 // The free set will reserve this amount of memory to hold young evacuations (initialized to the ideal reserve)
458 size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
459
460 // If ShenandoahOldEvacPercent equals 100, max_old_reserve is limited only by mutator_xfer_limit and young_reserve
461 const size_t bound_on_old_reserve =
462 ((old_currently_available + mutator_xfer_limit + young_reserve) * ShenandoahOldEvacPercent) / 100;
463 size_t proposed_max_old = ((ShenandoahOldEvacPercent == 100)?
464 bound_on_old_reserve:
465 MIN2((young_reserve * ShenandoahOldEvacPercent) / (100 - ShenandoahOldEvacPercent),
466 bound_on_old_reserve));
467 assert(mutator_xfer_limit <= young_available,
468 "Cannot transfer (%zu) memory that is not available (%zu)", mutator_xfer_limit, young_available);
469 // Young reserves are to be taken out of the mutator_xfer_limit.
470 if (young_reserve > mutator_xfer_limit) {
471 young_reserve = mutator_xfer_limit;
472 }
473 mutator_xfer_limit -= young_reserve;
474
475 // Decide how much old space we should reserve for a mixed collection
476 size_t proposed_reserve_for_mixed = 0;
477 const size_t old_fragmented_available =
478 old_currently_available - (old_generation()->free_unaffiliated_regions() + old_trashed_regions) * region_size_bytes;
479
480 if (old_fragmented_available > proposed_max_old) {
481 // In this case, the old_fragmented_available is greater than the desired amount of evacuation to old.
482 // We'll use all of this memory to hold results of old evacuation, and we'll give back to the young generation
483 // any old regions that are not fragmented.
484 //
485 // This scenario may happen after we have promoted many regions in place, and each of these regions had non-zero
486 // unused memory, so there is now an abundance of old-fragmented available memory, even more than the desired
487 // percentage for old reserve. We cannot transfer these fragmented regions back to young. Instead we make the
488 // best of the situation by using this fragmented memory for both promotions and evacuations.
489
490 proposed_max_old = old_fragmented_available;
491 }
492 // Otherwise: old_fragmented_available <= proposed_max_old. Do not shrink proposed_max_old from the original computation.
493
494 // Though we initially set proposed_reserve_for_promo to equal the entirety of old fragmented available, we have the
495 // opportunity below to shift some of this memory into the proposed_reserve_for_mixed.
496 size_t proposed_reserve_for_promo = old_fragmented_available;
497 const size_t max_old_reserve = proposed_max_old;
498
499 const size_t mixed_candidate_live_memory = old_generation()->unprocessed_collection_candidates_live_memory();
500 const bool doing_mixed = (mixed_candidate_live_memory > 0);
501 if (doing_mixed) {
502 // In the ideal, all of the memory reserved for mixed evacuation would be unfragmented, but we don't enforce
503 // this. Note that the initial value of max_evac_need is conservative because we may not evacuate all of the
504 // remaining mixed evacuation candidates in a single cycle.
505 const size_t max_evac_need = (size_t) (mixed_candidate_live_memory * ShenandoahOldEvacWaste);
506 assert(old_currently_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
507 "Unaffiliated available must be less than total available");
508
509 // We prefer to evacuate all of mixed into unfragmented memory, and will expand old in order to do so, unless
510 // we already have too much fragmented available memory in old.
511 proposed_reserve_for_mixed = max_evac_need;
512 if (proposed_reserve_for_mixed + proposed_reserve_for_promo > max_old_reserve) {
513 // We're trying to reserve more memory than is available. So we need to shrink our reserves.
514 size_t excess_reserves = (proposed_reserve_for_mixed + proposed_reserve_for_promo) - max_old_reserve;
515 // We need to shrink reserves by excess_reserves. We prefer to shrink by reducing promotion, giving priority to mixed
516 // evacuation. If the promotion reserve is larger than the amount we need to shrink by, do all the shrinkage there.
517 if (proposed_reserve_for_promo > excess_reserves) {
518 proposed_reserve_for_promo -= excess_reserves;
519 } else {
520 // Otherwise, we'll shrink promotion reserve to zero and we'll shrink the mixed-evac reserve by the remaining excess.
521 excess_reserves -= proposed_reserve_for_promo;
522 proposed_reserve_for_promo = 0;
523 proposed_reserve_for_mixed -= excess_reserves;
524 }
525 }
526 }
527 assert(proposed_reserve_for_mixed + proposed_reserve_for_promo <= max_old_reserve,
528 "Reserve for mixed (%zu) plus reserve for promotions (%zu) must be less than maximum old reserve (%zu)",
529 proposed_reserve_for_mixed, proposed_reserve_for_promo, max_old_reserve);
530
531 // Decide how much additional space we should reserve for promotions from young. We give priority to mixed evacations
532 // over promotions.
533 const size_t promo_load = old_generation()->get_promotion_potential();
534 const bool doing_promotions = promo_load > 0;
535
536 // promo_load represents the combined total of live memory within regions that have reached tenure age. The true
537 // promotion potential is larger than this, because individual objects within regions that have not yet reached tenure
538 // age may be promotable. On the other hand, some of the objects that we intend to promote in the next GC cycle may
539 // die before they are next marked. In the future, the promo_load will include the total size of tenurable objects
540 // residing in regions that have not yet reached tenure age.
541
542 if (doing_promotions) {
543 // We are always doing promotions, even when old_generation->get_promotion_potential() returns 0. As currently implemented,
544 // get_promotion_potential() only knows the total live memory contained within young-generation regions whose age is
545 // tenurable. It does not know whether that memory will still be live at the end of the next mark cycle, and it doesn't
546 // know how much memory is contained within objects whose individual ages are tenurable, which reside in regions with
547 // non-tenurable age. We use this, as adjusted by ShenandoahPromoEvacWaste, as an approximation of the total amount of
548 // memory to be promoted. In the near future, we expect to implement a change that will allow get_promotion_potential()
549 // to account also for the total memory contained within individual objects that are tenure-ready even when they do
550 // not reside in aged regions. This will represent a conservative over approximation of promotable memory because
551 // some of these objects may die before the next GC cycle executes.
552
553 // Be careful not to ask for too much promotion reserves. We have observed jtreg test failures under which a greedy
554 // promotion reserve causes a humongous allocation which is awaiting a full GC to fail (specifically
555 // gc/TestAllocHumongousFragment.java). This happens if too much of the memory reclaimed by the full GC
556 // is immediately reserved so that it cannot be allocated by the waiting mutator. It's not clear that this
557 // particular test is representative of the needs of typical GenShen users. It is really a test of high frequency
558 // Full GCs under heap fragmentation stress.
559
560 size_t promo_need = (size_t) (promo_load * ShenandoahPromoEvacWaste);
561 if (promo_need > proposed_reserve_for_promo) {
562 const size_t available_for_additional_promotions =
563 max_old_reserve - (proposed_reserve_for_mixed + proposed_reserve_for_promo);
564 if (proposed_reserve_for_promo + available_for_additional_promotions >= promo_need) {
565 proposed_reserve_for_promo = promo_need;
566 } else {
567 proposed_reserve_for_promo += available_for_additional_promotions;
568 }
569 }
570 }
571 // else, leave proposed_reserve_for_promo as is. By default, it is initialized to represent old_fragmented_available.
572
573 // This is the total old we want to reserve (initialized to the ideal reserve)
574 size_t proposed_old_reserve = proposed_reserve_for_mixed + proposed_reserve_for_promo;
575
576 // We now check if the old generation is running a surplus or a deficit.
577 size_t old_region_deficit = 0;
578 size_t old_region_surplus = 0;
579
580 size_t mutator_region_xfer_limit = mutator_xfer_limit / region_size_bytes;
581 // align the mutator_xfer_limit on region size
582 mutator_xfer_limit = mutator_region_xfer_limit * region_size_bytes;
583
584 if (old_currently_available >= proposed_old_reserve) {
585 // We are running a surplus, so the old region surplus can go to young
586 const size_t old_surplus = old_currently_available - proposed_old_reserve;
587 old_region_surplus = old_surplus / region_size_bytes;
588 const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_trashed_regions;
589 old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions);
590 old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
591 old_currently_available -= old_region_surplus * region_size_bytes;
592 young_available += old_region_surplus * region_size_bytes;
593 } else if (old_currently_available + mutator_xfer_limit >= proposed_old_reserve) {
594 // We know that old_currently_available < proposed_old_reserve because above test failed. Expand old_currently_available.
595 // Mutator's xfer limit is sufficient to satisfy our need: transfer all memory from there.
596 size_t old_deficit = proposed_old_reserve - old_currently_available;
597 old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
598 old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
599 old_currently_available += old_region_deficit * region_size_bytes;
600 young_available -= old_region_deficit * region_size_bytes;
601 } else {
602 // We know that (old_currently_available < proposed_old_reserve) and
603 // (old_currently_available + mutator_xfer_limit < proposed_old_reserve) because above tests failed.
604 // We need to shrink proposed_old_reserves.
605
606 // We could potentially shrink young_reserves in order to further expand proposed_old_reserves. Let's not bother. The
607 // important thing is that we keep a total amount of memory in reserve in preparation for the next GC cycle. At
608 // the time we choose the next collection set, we'll have an opportunity to shift some of these young reserves
609 // into old reserves if that makes sense.
610
611 // Start by taking all of mutator_xfer_limit into old_currently_available.
612 size_t old_region_deficit = mutator_region_xfer_limit;
613 old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
614 old_currently_available += old_region_deficit * region_size_bytes;
615 young_available -= old_region_deficit * region_size_bytes;
616
617 assert(old_currently_available < proposed_old_reserve,
618 "Old currently available (%zu) must be less than old reserve (%zu)", old_currently_available, proposed_old_reserve);
619
620 // There's not enough memory to satisfy our desire. Scale back our old-gen intentions. We prefer to satisfy
621 // the budget_overrun entirely from the promotion reserve, if that is large enough. Otherwise, we'll satisfy
622 // the overrun from a combination of promotion and mixed-evacuation reserves.
623 size_t budget_overrun = proposed_old_reserve - old_currently_available;
624 if (proposed_reserve_for_promo > budget_overrun) {
625 proposed_reserve_for_promo -= budget_overrun;
626 // Dead code:
627 // proposed_old_reserve -= budget_overrun;
628 } else {
629 budget_overrun -= proposed_reserve_for_promo;
630 proposed_reserve_for_promo = 0;
631 proposed_reserve_for_mixed = (proposed_reserve_for_mixed > budget_overrun)? proposed_reserve_for_mixed - budget_overrun: 0;
632 // Dead code:
633 // Note: proposed_reserve_for_promo is 0 and proposed_reserve_for_mixed may equal 0.
634 // proposed_old_reserve = proposed_reserve_for_mixed;
635 }
636 }
637
638 assert(old_region_deficit == 0 || old_region_surplus == 0,
639 "Only surplus (%zu) or deficit (%zu), never both", old_region_surplus, old_region_deficit);
640 assert(young_reserve + proposed_reserve_for_mixed + proposed_reserve_for_promo <= old_currently_available + young_available,
641 "Cannot reserve more memory than is available: %zu + %zu + %zu <= %zu + %zu",
642 young_reserve, proposed_reserve_for_mixed, proposed_reserve_for_promo, old_currently_available, young_available);
643
644 // deficit/surplus adjustments to generation sizes will precede rebuild
645 young_generation()->set_evacuation_reserve(young_reserve);
646 old_generation()->set_evacuation_reserve(proposed_reserve_for_mixed);
647 old_generation()->set_promoted_reserve(proposed_reserve_for_promo);
648 }
649
650 void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) {
651 class ShenandoahGlobalCoalesceAndFill : public WorkerTask {
652 private:
653 ShenandoahPhaseTimings::Phase _phase;
654 ShenandoahRegionIterator _regions;
655 public:
656 explicit ShenandoahGlobalCoalesceAndFill(ShenandoahPhaseTimings::Phase phase) :
657 WorkerTask("Shenandoah Global Coalesce"),
658 _phase(phase) {}
659
660 void work(uint worker_id) override {
661 ShenandoahWorkerTimingsTracker timer(_phase,
662 ShenandoahPhaseTimings::ScanClusters,
663 worker_id, true);
664 ShenandoahHeapRegion* region;
665 while ((region = _regions.next()) != nullptr) {
666 // old region is not in the collection set and was not immediately trashed
667 if (region->is_old() && region->is_active() && !region->is_humongous()) {
668 // Reset the coalesce and fill boundary because this is a global collect
669 // and cannot be preempted by young collects. We want to be sure the entire
670 // region is coalesced here and does not resume from a previously interrupted
671 // or completed coalescing.
672 region->begin_preemptible_coalesce_and_fill();
673 region->oop_coalesce_and_fill(false);
674 }
675 }
676 }
677 };
678
679 ShenandoahPhaseTimings::Phase phase = concurrent ?
680 ShenandoahPhaseTimings::conc_coalesce_and_fill :
681 ShenandoahPhaseTimings::degen_gc_coalesce_and_fill;
682
683 // This is not cancellable
684 ShenandoahGlobalCoalesceAndFill coalesce(phase);
685 workers()->run_task(&coalesce);
686 old_generation()->set_parsable(true);
687 }
688
689 template<bool CONCURRENT>
690 class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
691 private:
692 // For update refs, _generation will be young or global. Mixed collections use the young generation.
693 ShenandoahGeneration* _generation;
694 ShenandoahGenerationalHeap* _heap;
695 ShenandoahRegionIterator* _regions;
696 ShenandoahRegionChunkIterator* _work_chunks;
697
698 public:
699 ShenandoahGenerationalUpdateHeapRefsTask(ShenandoahGeneration* generation,
700 ShenandoahRegionIterator* regions,
701 ShenandoahRegionChunkIterator* work_chunks) :
702 WorkerTask("Shenandoah Update References"),
703 _generation(generation),
704 _heap(ShenandoahGenerationalHeap::heap()),
705 _regions(regions),
706 _work_chunks(work_chunks)
707 {
708 const bool old_bitmap_stable = _heap->old_generation()->is_mark_complete();
709 log_debug(gc, remset)("Update refs, scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
710 }
711
712 void work(uint worker_id) override {
713 if (CONCURRENT) {
714 ShenandoahConcurrentWorkerSession worker_session(worker_id);
715 ShenandoahSuspendibleThreadSetJoiner stsj;
716 do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
717 } else {
718 ShenandoahParallelWorkerSession worker_session(worker_id);
719 do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
720 }
721 }
722
723 private:
724 template<class T>
725 void do_work(uint worker_id) {
726 T cl;
727
728 if (CONCURRENT && (worker_id == 0)) {
729 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
730 // results of evacuation. These reserves are no longer necessary because evacuation has completed.
731 size_t cset_regions = _heap->collection_set()->count();
732
733 // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
734 // to the mutator free set. At the end of GC, we will have cset_regions newly evacuated fully empty regions from
735 // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
736 // next GC cycle.
737 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
738 }
739 // If !CONCURRENT, there's no value in expanding Mutator free set
740
741 ShenandoahHeapRegion* r = _regions->next();
742 // We update references for global, mixed, and young collections.
743 assert(_generation->is_mark_complete(), "Expected complete marking");
744 ShenandoahMarkingContext* const ctx = _heap->marking_context();
745 bool is_mixed = _heap->collection_set()->has_old_regions();
746 while (r != nullptr) {
747 HeapWord* update_watermark = r->get_update_watermark();
748 assert(update_watermark >= r->bottom(), "sanity");
749
750 log_debug(gc)("Update refs worker " UINT32_FORMAT ", looking at region %zu", worker_id, r->index());
751 if (r->is_active() && !r->is_cset()) {
752 if (r->is_young()) {
753 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
754 } else if (r->is_old()) {
755 if (_generation->is_global()) {
756
757 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
758 }
759 // Otherwise, this is an old region in a young or mixed cycle. Process it during a second phase, below.
760 } else {
761 // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
762 // to a non-free active region while this loop is executing. Whenever this happens, the changing of a region's
763 // active status may propagate at a different speed than the changing of the region's affiliation.
764
765 // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
766 // by this thread before the region's affiliation() is seen by this thread.
767
768 // It's ok for this race to occur because the newly transformed region does not have any references to be
769 // updated.
770
771 assert(r->get_update_watermark() == r->bottom(),
772 "%s Region %zu is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
773 r->affiliation_name(), r->index());
774 }
775 }
776
777 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
778 return;
779 }
780
781 r = _regions->next();
782 }
783
784 if (_generation->is_young()) {
785 // Since this is generational and not GLOBAL, we have to process the remembered set. There's no remembered
786 // set processing if not in generational mode or if GLOBAL mode.
787
788 // After this thread has exhausted its traditional update-refs work, it continues with updating refs within
789 // remembered set. The remembered set workload is better balanced between threads, so threads that are "behind"
790 // can catch up with other threads during this phase, allowing all threads to work more effectively in parallel.
791 update_references_in_remembered_set(worker_id, cl, ctx, is_mixed);
792 }
793 }
794
795 template<class T>
796 void update_references_in_remembered_set(uint worker_id, T &cl, const ShenandoahMarkingContext* ctx, bool is_mixed) {
797
798 struct ShenandoahRegionChunk assignment;
799 ShenandoahScanRemembered* scanner = _heap->old_generation()->card_scan();
800
801 while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) {
802 // Keep grabbing next work chunk to process until finished, or asked to yield
803 ShenandoahHeapRegion* r = assignment._r;
804 if (r->is_active() && !r->is_cset() && r->is_old()) {
805 HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
806 HeapWord* end_of_range = r->get_update_watermark();
807 if (end_of_range > start_of_range + assignment._chunk_size) {
808 end_of_range = start_of_range + assignment._chunk_size;
809 }
810
811 if (start_of_range >= end_of_range) {
812 continue;
813 }
814
815 // Old region in a young cycle or mixed cycle.
816 if (is_mixed) {
817 if (r->is_humongous()) {
818 // Need to examine both dirty and clean cards during mixed evac.
819 r->oop_iterate_humongous_slice_all(&cl,start_of_range, assignment._chunk_size);
820 } else {
821 // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
822 // and filled. This will use mark bits to find objects that need to be updated.
823 update_references_in_old_region(cl, ctx, scanner, r, start_of_range, end_of_range);
824 }
825 } else {
826 // This is a young evacuation
827 size_t cluster_size = CardTable::card_size_in_words() * ShenandoahCardCluster::CardsPerCluster;
828 size_t clusters = assignment._chunk_size / cluster_size;
829 assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
830 scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id);
831 }
832 }
833 }
834 }
835
836 template<class T>
837 void update_references_in_old_region(T &cl, const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner,
838 const ShenandoahHeapRegion* r, HeapWord* start_of_range,
839 HeapWord* end_of_range) const {
840 // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
841 ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
842
843 // Any object that begins in a previous range is part of a different scanning assignment. Any object that
844 // starts after end_of_range is also not my responsibility. (Either allocated during evacuation, so does
845 // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
846
847 // Find the first object that begins in my range, if there is one. Note that `p` will be set to `end_of_range`
848 // when no live object is found in the range.
849 HeapWord* tams = ctx->top_at_mark_start(r);
850 HeapWord* p = get_first_object_start_word(ctx, scanner, tams, start_of_range, end_of_range);
851
852 while (p < end_of_range) {
853 // p is known to point to the beginning of marked object obj
854 oop obj = cast_to_oop(p);
855 objs.do_object(obj);
856 HeapWord* prev_p = p;
857 p += obj->size();
858 if (p < tams) {
859 p = ctx->get_next_marked_addr(p, tams);
860 // If there are no more marked objects before tams, this returns tams. Note that tams is
861 // either >= end_of_range, or tams is the start of an object that is marked.
862 }
863 assert(p != prev_p, "Lack of forward progress");
864 }
865 }
866
867 HeapWord* get_first_object_start_word(const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner, HeapWord* tams,
868 HeapWord* start_of_range, HeapWord* end_of_range) const {
869 HeapWord* p = start_of_range;
870
871 if (p >= tams) {
872 // We cannot use ctx->is_marked(obj) to test whether an object begins at this address. Instead,
873 // we need to use the remembered set crossing map to advance p to the first object that starts
874 // within the enclosing card.
875 size_t card_index = scanner->card_index_for_addr(start_of_range);
876 while (true) {
877 HeapWord* first_object = scanner->first_object_in_card(card_index);
878 if (first_object != nullptr) {
879 p = first_object;
880 break;
881 } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
882 card_index++;
883 } else {
884 // Signal that no object was found in range
885 p = end_of_range;
886 break;
887 }
888 }
889 } else if (!ctx->is_marked(cast_to_oop(p))) {
890 p = ctx->get_next_marked_addr(p, tams);
891 // If there are no more marked objects before tams, this returns tams.
892 // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
893 }
894 return p;
895 }
896 };
897
898 void ShenandoahGenerationalHeap::update_heap_references(ShenandoahGeneration* generation, bool concurrent) {
899 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
900 const uint nworkers = workers()->active_workers();
901 ShenandoahRegionChunkIterator work_list(nworkers);
902 if (concurrent) {
903 ShenandoahGenerationalUpdateHeapRefsTask<true> task(generation, &_update_refs_iterator, &work_list);
904 workers()->run_task(&task);
905 } else {
906 ShenandoahGenerationalUpdateHeapRefsTask<false> task(generation, &_update_refs_iterator, &work_list);
907 workers()->run_task(&task);
908 }
909
910 if (ShenandoahEnableCardStats) {
911 // Only do this if we are collecting card stats
912 ShenandoahScanRemembered* card_scan = old_generation()->card_scan();
913 assert(card_scan != nullptr, "Card table must exist when card stats are enabled");
914 card_scan->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS);
915 }
916 }
917
918 struct ShenandoahCompositeRegionClosure {
919 template<typename C1, typename C2>
920 class Closure : public ShenandoahHeapRegionClosure {
921 private:
922 C1 &_c1;
923 C2 &_c2;
924
925 public:
926 Closure(C1 &c1, C2 &c2) : ShenandoahHeapRegionClosure(), _c1(c1), _c2(c2) {}
927
928 void heap_region_do(ShenandoahHeapRegion* r) override {
929 _c1.heap_region_do(r);
930 _c2.heap_region_do(r);
931 }
932
933 bool is_thread_safe() override {
934 return _c1.is_thread_safe() && _c2.is_thread_safe();
935 }
936 };
937
938 template<typename C1, typename C2>
939 static Closure<C1, C2> of(C1 &c1, C2 &c2) {
940 return Closure<C1, C2>(c1, c2);
941 }
942 };
943
944 class ShenandoahUpdateRegionAges : public ShenandoahHeapRegionClosure {
945 private:
946 ShenandoahMarkingContext* _ctx;
947
948 public:
949 explicit ShenandoahUpdateRegionAges(ShenandoahMarkingContext* ctx) : _ctx(ctx) { }
950
951 void heap_region_do(ShenandoahHeapRegion* r) override {
952 // Maintenance of region age must follow evacuation in order to account for
953 // evacuation allocations within survivor regions. We consult region age during
954 // the subsequent evacuation to determine whether certain objects need to
955 // be promoted.
956 if (r->is_young() && r->is_active()) {
957 HeapWord *tams = _ctx->top_at_mark_start(r);
958 HeapWord *top = r->top();
959
960 // Allocations move the watermark when top moves. However, compacting
961 // objects will sometimes lower top beneath the watermark, after which,
962 // attempts to read the watermark will assert out (watermark should not be
963 // higher than top).
964 if (top > tams) {
965 // There have been allocations in this region since the start of the cycle.
966 // Any objects new to this region must not assimilate elevated age.
967 r->reset_age();
968 } else if (ShenandoahGenerationalHeap::heap()->is_aging_cycle()) {
969 r->increment_age();
970 }
971 }
972 }
973
974 bool is_thread_safe() override {
975 return true;
976 }
977 };
978
979 void ShenandoahGenerationalHeap::final_update_refs_update_region_states() {
980 ShenandoahSynchronizePinnedRegionStates pins;
981 ShenandoahUpdateRegionAges ages(marking_context());
982 auto cl = ShenandoahCompositeRegionClosure::of(pins, ages);
983 parallel_heap_region_iterate(&cl);
984 }
985
986 void ShenandoahGenerationalHeap::complete_degenerated_cycle() {
987 shenandoah_assert_heaplocked_or_safepoint();
988 if (!old_generation()->is_parsable()) {
989 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill);
990 coalesce_and_fill_old_regions(false);
991 }
992
993 old_generation()->maybe_log_promotion_failure_stats(false);
994 }
995
996 void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
997 if (!old_generation()->is_parsable()) {
998 // Class unloading may render the card offsets unusable, so we must rebuild them before
999 // the next remembered set scan. We _could_ let the control thread do this sometime after
1000 // the global cycle has completed and before the next young collection, but under memory
1001 // pressure the control thread may not have the time (that is, because it's running back
1002 // to back GCs). In that scenario, we would have to make the old regions parsable before
1003 // we could start a young collection. This could delay the start of the young cycle and
1004 // throw off the heuristics.
1005 entry_global_coalesce_and_fill();
1006 }
1007
1008 old_generation()->maybe_log_promotion_failure_stats(true);
1009 }
1010
1011 void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() {
1012 const char* msg = "Coalescing and filling old regions";
1013 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill);
1014
1015 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
1016 EventMark em("%s", msg);
1017 ShenandoahWorkerScope scope(workers(),
1018 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
1019 "concurrent coalesce and fill");
1020
1021 coalesce_and_fill_old_regions(true);
1022 }
1023
1024 void ShenandoahGenerationalHeap::update_region_ages(ShenandoahMarkingContext* ctx) {
1025 ShenandoahUpdateRegionAges cl(ctx);
1026 parallel_heap_region_iterate(&cl);
1027 }