1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "gc/shared/fullGCForwarding.inline.hpp"
 27 #include "gc/shared/preservedMarks.inline.hpp"
 28 #include "gc/shenandoah/shenandoahGeneration.hpp"
 29 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp"
 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 33 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 34 #include "gc/shenandoah/shenandoahUtils.hpp"
 35 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 36 
 37 #ifdef ASSERT
 38 void assert_regions_used_not_more_than_capacity(ShenandoahGeneration* generation) {
 39   assert(generation->used_regions_size() <= generation->max_capacity(),
 40          "%s generation affiliated regions must be less than capacity", generation->name());
 41 }
 42 
 43 void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) {
 44   assert(generation->used() <= generation->used_regions_size(),
 45          "%s consumed can be no larger than span of affiliated regions", generation->name());
 46 }
 47 #else
 48 void assert_regions_used_not_more_than_capacity(ShenandoahGeneration* generation) {}
 49 void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) {}
 50 #endif
 51 
 52 
 53 void ShenandoahGenerationalFullGC::prepare() {
 54   auto heap = ShenandoahGenerationalHeap::heap();
 55   // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
 56   heap->set_active_generation(heap->global_generation());
 57 
 58   // No need for old_gen->increase_used() as this was done when plabs were allocated.
 59   heap->reset_generation_reserves();
 60 
 61   // Full GC supersedes any marking or coalescing in old generation.
 62   heap->old_generation()->cancel_gc();
 63 }
 64 
 65 void ShenandoahGenerationalFullGC::handle_completion(ShenandoahHeap* heap) {
 66   // Full GC should reset time since last gc for young and old heuristics
 67   ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap);
 68   ShenandoahYoungGeneration* young = gen_heap->young_generation();
 69   ShenandoahOldGeneration* old = gen_heap->old_generation();
 70   young->heuristics()->record_cycle_end();
 71   old->heuristics()->record_cycle_end();
 72 
 73   gen_heap->mmu_tracker()->record_full(GCId::current());
 74   gen_heap->log_heap_status("At end of Full GC");
 75 
 76   assert(old->is_idle(), "After full GC, old generation should be idle.");
 77 
 78   // Since we allow temporary violation of these constraints during Full GC, we want to enforce that the assertions are
 79   // made valid by the time Full GC completes.
 80   assert_regions_used_not_more_than_capacity(old);
 81   assert_regions_used_not_more_than_capacity(young);
 82   assert_usage_not_more_than_regions_used(old);
 83   assert_usage_not_more_than_regions_used(young);
 84 
 85   // Establish baseline for next old-has-grown trigger.
 86   old->set_live_bytes_after_last_mark(old->used());
 87 }
 88 
 89 void ShenandoahGenerationalFullGC::rebuild_remembered_set(ShenandoahHeap* heap) {
 90   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set);
 91 
 92   ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan();
 93   scanner->mark_read_table_as_clean();
 94   scanner->swap_card_tables();
 95 
 96   ShenandoahRegionIterator regions;
 97   ShenandoahReconstructRememberedSetTask task(&regions);
 98   heap->workers()->run_task(&task);
 99 
100   // Rebuilding the remembered set recomputes all the card offsets for objects.
101   // The adjust pointers phase coalesces and fills all necessary regions. In case
102   // we came to the full GC from an incomplete global cycle, we need to indicate
103   // that the old regions are parsable.
104   heap->old_generation()->set_parsable(true);
105 }
106 
107 void ShenandoahGenerationalFullGC::log_live_in_old(ShenandoahHeap* heap) {
108   LogTarget(Debug, gc) lt;
109   if (lt.is_enabled()) {
110     size_t live_bytes_in_old = 0;
111     for (size_t i = 0; i < heap->num_regions(); i++) {
112       ShenandoahHeapRegion* r = heap->get_region(i);
113       if (r->is_old()) {
114         live_bytes_in_old += r->get_live_data_bytes();
115       }
116     }
117     log_debug(gc)("Live bytes in old after STW mark: " PROPERFMT, PROPERFMTARGS(live_bytes_in_old));
118   }
119 }
120 
121 void ShenandoahGenerationalFullGC::restore_top_before_promote(ShenandoahHeap* heap) {
122   for (size_t i = 0; i < heap->num_regions(); i++) {
123     ShenandoahHeapRegion* r = heap->get_region(i);
124     if (r->get_top_before_promote() != nullptr) {
125       r->restore_top_before_promote();
126     }
127   }
128 }
129 
130 void ShenandoahGenerationalFullGC::account_for_region(ShenandoahHeapRegion* r, size_t &region_count, size_t &region_usage, size_t &humongous_waste) {
131   region_count++;
132   region_usage += r->used();
133   if (r->is_humongous_start()) {
134     // For each humongous object, we take this path once regardless of how many regions it spans.
135     HeapWord* obj_addr = r->bottom();
136     oop obj = cast_to_oop(obj_addr);
137     size_t word_size = obj->size();
138     size_t region_size_words = ShenandoahHeapRegion::region_size_words();
139     size_t overreach = word_size % region_size_words;
140     if (overreach != 0) {
141       humongous_waste += (region_size_words - overreach) * HeapWordSize;
142     }
143     // else, this humongous object aligns exactly on region size, so no waste.
144   }
145 }
146 
147 void ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(ShenandoahHeapRegion* r) {
148   if (r->is_pinned() && r->is_old() && r->is_active() && !r->is_humongous()) {
149     r->begin_preemptible_coalesce_and_fill();
150     r->oop_coalesce_and_fill(false);
151   }
152 }
153 
154 void ShenandoahGenerationalFullGC::compute_balances() {
155   auto heap = ShenandoahGenerationalHeap::heap();
156 
157   // In case this Full GC resulted from degeneration, clear the tally on anticipated promotion.
158   heap->old_generation()->set_promotion_potential(0);
159   // Invoke this in case we are able to transfer memory from OLD to YOUNG.
160   heap->compute_old_generation_balance(0, 0);
161 }
162 
163 ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks,
164                                                           GrowableArray<ShenandoahHeapRegion*>& empty_regions,
165                                                           ShenandoahHeapRegion* from_region, uint worker_id) :
166         _preserved_marks(preserved_marks),
167         _heap(ShenandoahGenerationalHeap::heap()),
168         _empty_regions(empty_regions),
169         _empty_regions_pos(0),
170         _old_to_region(nullptr),
171         _young_to_region(nullptr),
172         _from_region(nullptr),
173         _from_affiliation(ShenandoahAffiliation::FREE),
174         _old_compact_point(nullptr),
175         _young_compact_point(nullptr),
176         _worker_id(worker_id) {
177   assert(from_region != nullptr, "Worker needs from_region");
178   // assert from_region has live?
179   if (from_region->is_old()) {
180     _old_to_region = from_region;
181     _old_compact_point = from_region->bottom();
182   } else if (from_region->is_young()) {
183     _young_to_region = from_region;
184     _young_compact_point = from_region->bottom();
185   }
186 }
187 
188 void ShenandoahPrepareForGenerationalCompactionObjectClosure::set_from_region(ShenandoahHeapRegion* from_region) {
189   log_debug(gc)("Worker %u compacting %s Region %zu which had used %zu and %s live",
190                 _worker_id, from_region->affiliation_name(),
191                 from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have");
192 
193   _from_region = from_region;
194   _from_affiliation = from_region->affiliation();
195   if (_from_region->has_live()) {
196     if (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION) {
197       if (_old_to_region == nullptr) {
198         _old_to_region = from_region;
199         _old_compact_point = from_region->bottom();
200       }
201     } else {
202       assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG");
203       if (_young_to_region == nullptr) {
204         _young_to_region = from_region;
205         _young_compact_point = from_region->bottom();
206       }
207     }
208   } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies
209 }
210 
211 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish() {
212   finish_old_region();
213   finish_young_region();
214 }
215 
216 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_old_region() {
217   if (_old_to_region != nullptr) {
218     log_debug(gc)("Planned compaction into Old Region %zu, used: %zu tabulated by worker %u",
219             _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id);
220     _old_to_region->set_new_top(_old_compact_point);
221     _old_to_region = nullptr;
222   }
223 }
224 
225 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_young_region() {
226   if (_young_to_region != nullptr) {
227     log_debug(gc)("Worker %u planned compaction into Young Region %zu, used: %zu",
228             _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
229     _young_to_region->set_new_top(_young_compact_point);
230     _young_to_region = nullptr;
231   }
232 }
233 
234 bool ShenandoahPrepareForGenerationalCompactionObjectClosure::is_compact_same_region() {
235   return (_from_region == _old_to_region) || (_from_region == _young_to_region);
236 }
237 
238 void ShenandoahPrepareForGenerationalCompactionObjectClosure::do_object(oop p) {
239   assert(_from_region != nullptr, "must set before work");
240   assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
241          "Object must reside in _from_region");
242   assert(_heap->global_generation()->complete_marking_context()->is_marked(p), "must be marked");
243   assert(!_heap->global_generation()->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
244 
245   size_t old_size = p->size();
246   size_t new_size = p->copy_size(old_size, p->mark());
247   uint from_region_age = _from_region->age();
248   uint object_age = p->age();
249 
250   bool promote_object = false;
251   if ((_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION) &&
252       _heap->age_census()->is_tenurable(from_region_age + object_age)) {
253     if ((_old_to_region != nullptr) && (_old_compact_point + new_size > _old_to_region->end())) {
254       finish_old_region();
255       _old_to_region = nullptr;
256     }
257     if (_old_to_region == nullptr) {
258       if (_empty_regions_pos < _empty_regions.length()) {
259         ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos);
260         _empty_regions_pos++;
261         new_to_region->set_affiliation(OLD_GENERATION);
262         _old_to_region = new_to_region;
263         _old_compact_point = _old_to_region->bottom();
264         promote_object = true;
265       }
266       // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so
267       // we leave promote_object as false, deferring the promotion.
268     } else {
269       promote_object = true;
270     }
271   }
272 
273   if (promote_object || (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION)) {
274     assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region");
275     size_t obj_size = _old_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
276     if (_old_compact_point + obj_size > _old_to_region->end()) {
277       ShenandoahHeapRegion* new_to_region;
278 
279       log_debug(gc)("Worker %u finishing old region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu"
280       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _old_to_region->index(),
281               p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
282 
283       // Object does not fit.  Get a new _old_to_region.
284       finish_old_region();
285       if (_empty_regions_pos < _empty_regions.length()) {
286         new_to_region = _empty_regions.at(_empty_regions_pos);
287         _empty_regions_pos++;
288         new_to_region->set_affiliation(OLD_GENERATION);
289       } else {
290         // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
291         // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
292         // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
293         new_to_region = _from_region;
294       }
295 
296       assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
297       assert(new_to_region != nullptr, "must not be nullptr");
298       _old_to_region = new_to_region;
299       _old_compact_point = _old_to_region->bottom();
300       obj_size = _old_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
301     }
302 
303     // Object fits into current region, record new location, if object does not move:
304     assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
305     shenandoah_assert_not_forwarded(nullptr, p);
306     if (_old_compact_point != cast_from_oop<HeapWord*>(p)) {
307       _preserved_marks->push_if_necessary(p, p->mark());
308       FullGCForwarding::forward_to(p, cast_to_oop(_old_compact_point));
309     }
310     _old_compact_point += obj_size;
311   } else {
312     assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION,
313            "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
314     assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region");
315 
316     // After full gc compaction, all regions have age 0.  Embed the region's age into the object's age in order to preserve
317     // tenuring progress.
318     if (_heap->is_aging_cycle()) {
319       ShenandoahHeap::increase_object_age(p, from_region_age + 1);
320     } else {
321       ShenandoahHeap::increase_object_age(p, from_region_age);
322     }
323 
324     size_t obj_size = _young_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
325     if (_young_compact_point + obj_size > _young_to_region->end()) {
326       ShenandoahHeapRegion* new_to_region;
327 
328       log_debug(gc)("Worker %u finishing young region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu"
329       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _young_to_region->index(),
330               p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
331 
332       // Object does not fit.  Get a new _young_to_region.
333       finish_young_region();
334       if (_empty_regions_pos < _empty_regions.length()) {
335         new_to_region = _empty_regions.at(_empty_regions_pos);
336         _empty_regions_pos++;
337         new_to_region->set_affiliation(YOUNG_GENERATION);
338       } else {
339         // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
340         // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
341         // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
342         new_to_region = _from_region;
343       }
344 
345       assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
346       assert(new_to_region != nullptr, "must not be nullptr");
347       _young_to_region = new_to_region;
348       obj_size = _young_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
349       _young_compact_point = _young_to_region->bottom();
350     }
351 
352     // Object fits into current region, record new location, if object does not move:
353     assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
354     shenandoah_assert_not_forwarded(nullptr, p);
355 
356     if (_young_compact_point != cast_from_oop<HeapWord*>(p)) {
357       _preserved_marks->push_if_necessary(p, p->mark());
358       FullGCForwarding::forward_to(p, cast_to_oop(_young_compact_point));
359     }
360     _young_compact_point += obj_size;
361   }
362 }