1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "gc/shared/fullGCForwarding.inline.hpp"
 27 #include "gc/shared/preservedMarks.inline.hpp"
 28 #include "gc/shenandoah/shenandoahGeneration.hpp"
 29 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp"
 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 33 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 34 #include "gc/shenandoah/shenandoahUtils.hpp"
 35 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 36 
 37 #ifdef ASSERT
 38 void assert_regions_used_not_more_than_capacity(ShenandoahGeneration* generation) {
 39   assert(generation->used_regions_size() <= generation->max_capacity(),
 40          "%s generation affiliated regions must be less than capacity", generation->name());
 41 }
 42 
 43 void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) {
 44   assert(generation->used() <= generation->used_regions_size(),
 45          "%s consumed can be no larger than span of affiliated regions", generation->name());
 46 }
 47 #else
 48 void assert_regions_used_not_more_than_capacity(ShenandoahGeneration* generation) {}
 49 void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) {}
 50 #endif
 51 
 52 
 53 void ShenandoahGenerationalFullGC::prepare() {
 54   auto heap = ShenandoahGenerationalHeap::heap();
 55   // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
 56   heap->set_active_generation(heap->global_generation());
 57 
 58   // Full GC supersedes any marking or coalescing in old generation.
 59   heap->old_generation()->cancel_gc();
 60 }
 61 
 62 void ShenandoahGenerationalFullGC::handle_completion(ShenandoahHeap* heap) {
 63   // Full GC should reset time since last gc for young and old heuristics
 64   ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap);
 65   ShenandoahYoungGeneration* young = gen_heap->young_generation();
 66   ShenandoahOldGeneration* old = gen_heap->old_generation();
 67   young->heuristics()->record_cycle_end();
 68   old->heuristics()->record_cycle_end();
 69 
 70   gen_heap->mmu_tracker()->record_full(GCId::current());
 71   gen_heap->log_heap_status("At end of Full GC");
 72 
 73   assert(old->is_idle(), "After full GC, old generation should be idle.");
 74 
 75   // Since we allow temporary violation of these constraints during Full GC, we want to enforce that the assertions are
 76   // made valid by the time Full GC completes.
 77   assert_regions_used_not_more_than_capacity(old);
 78   assert_regions_used_not_more_than_capacity(young);
 79   assert_usage_not_more_than_regions_used(old);
 80   assert_usage_not_more_than_regions_used(young);
 81 
 82   // Establish baseline for next old-has-grown trigger.
 83   old->set_live_bytes_at_last_mark(old->used());
 84 }
 85 
 86 void ShenandoahGenerationalFullGC::rebuild_remembered_set(ShenandoahHeap* heap) {
 87   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set);
 88 
 89   ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan();
 90   scanner->mark_read_table_as_clean();
 91   scanner->swap_card_tables();
 92 
 93   ShenandoahRegionIterator regions;
 94   ShenandoahReconstructRememberedSetTask task(&regions);
 95   heap->workers()->run_task(&task);
 96 
 97   // Rebuilding the remembered set recomputes all the card offsets for objects.
 98   // The adjust pointers phase coalesces and fills all necessary regions. In case
 99   // we came to the full GC from an incomplete global cycle, we need to indicate
100   // that the old regions are parsable.
101   heap->old_generation()->set_parsable(true);
102 }
103 
104 void ShenandoahGenerationalFullGC::log_live_in_old(ShenandoahHeap* heap) {
105   LogTarget(Debug, gc) lt;
106   if (lt.is_enabled()) {
107     size_t live_bytes_in_old = 0;
108     for (size_t i = 0; i < heap->num_regions(); i++) {
109       ShenandoahHeapRegion* r = heap->get_region(i);
110       if (r->is_old()) {
111         live_bytes_in_old += r->get_live_data_bytes();
112       }
113     }
114     log_debug(gc)("Live bytes in old after STW mark: " PROPERFMT, PROPERFMTARGS(live_bytes_in_old));
115   }
116 }
117 
118 void ShenandoahGenerationalFullGC::restore_top_before_promote(ShenandoahHeap* heap) {
119   for (size_t i = 0; i < heap->num_regions(); i++) {
120     ShenandoahHeapRegion* r = heap->get_region(i);
121     if (r->get_top_before_promote() != nullptr) {
122       r->restore_top_before_promote();
123     }
124   }
125 }
126 
127 void ShenandoahGenerationalFullGC::account_for_region(ShenandoahHeapRegion* r, size_t &region_count, size_t &region_usage, size_t &humongous_waste) {
128   region_count++;
129   region_usage += r->used();
130   if (r->is_humongous_start()) {
131     // For each humongous object, we take this path once regardless of how many regions it spans.
132     HeapWord* obj_addr = r->bottom();
133     oop obj = cast_to_oop(obj_addr);
134     size_t word_size = obj->size();
135     size_t region_size_words = ShenandoahHeapRegion::region_size_words();
136     size_t overreach = word_size % region_size_words;
137     if (overreach != 0) {
138       humongous_waste += (region_size_words - overreach) * HeapWordSize;
139     }
140     // else, this humongous object aligns exactly on region size, so no waste.
141   }
142 }
143 
144 void ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(ShenandoahHeapRegion* r) {
145   if (r->is_pinned() && r->is_old() && r->is_active() && !r->is_humongous()) {
146     r->begin_preemptible_coalesce_and_fill();
147     r->oop_coalesce_and_fill(false);
148   }
149 }
150 
151 void ShenandoahGenerationalFullGC::compute_balances() {
152   auto heap = ShenandoahGenerationalHeap::heap();
153 
154   // In case this Full GC resulted from degeneration, clear the tally on anticipated promotion.
155   heap->old_generation()->set_promotion_potential(0);
156 
157   // Invoke this in case we are able to transfer memory from OLD to YOUNG
158   size_t allocation_runway =
159     heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0L);
160   heap->compute_old_generation_balance(allocation_runway, 0, 0);
161 }
162 
163 ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks,
164                                                           GrowableArray<ShenandoahHeapRegion*>& empty_regions,
165                                                           ShenandoahHeapRegion* from_region, uint worker_id) :
166         _preserved_marks(preserved_marks),
167         _heap(ShenandoahGenerationalHeap::heap()),
168         _empty_regions(empty_regions),
169         _empty_regions_pos(0),
170         _old_to_region(nullptr),
171         _young_to_region(nullptr),
172         _from_region(nullptr),
173         _from_affiliation(ShenandoahAffiliation::FREE),
174         _old_compact_point(nullptr),
175         _young_compact_point(nullptr),
176         _worker_id(worker_id) {
177   assert(from_region != nullptr, "Worker needs from_region");
178   // assert from_region has live?
179   if (from_region->is_old()) {
180     _old_to_region = from_region;
181     _old_compact_point = from_region->bottom();
182   } else if (from_region->is_young()) {
183     _young_to_region = from_region;
184     _young_compact_point = from_region->bottom();
185   }
186 }
187 
188 void ShenandoahPrepareForGenerationalCompactionObjectClosure::set_from_region(ShenandoahHeapRegion* from_region) {
189   log_debug(gc)("Worker %u compacting %s Region %zu which had used %zu and %s live",
190                 _worker_id, from_region->affiliation_name(),
191                 from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have");
192 
193   _from_region = from_region;
194   _from_affiliation = from_region->affiliation();
195   if (_from_region->has_live()) {
196     if (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION) {
197       if (_old_to_region == nullptr) {
198         _old_to_region = from_region;
199         _old_compact_point = from_region->bottom();
200       }
201     } else {
202       assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG");
203       if (_young_to_region == nullptr) {
204         _young_to_region = from_region;
205         _young_compact_point = from_region->bottom();
206       }
207     }
208   } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies
209 }
210 
211 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish() {
212   finish_old_region();
213   finish_young_region();
214 }
215 
216 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_old_region() {
217   if (_old_to_region != nullptr) {
218     log_debug(gc)("Planned compaction into Old Region %zu, used: %zu tabulated by worker %u",
219             _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id);
220     _old_to_region->set_new_top(_old_compact_point);
221     _old_to_region = nullptr;
222   }
223 }
224 
225 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_young_region() {
226   if (_young_to_region != nullptr) {
227     log_debug(gc)("Worker %u planned compaction into Young Region %zu, used: %zu",
228             _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
229     _young_to_region->set_new_top(_young_compact_point);
230     _young_to_region = nullptr;
231   }
232 }
233 
234 bool ShenandoahPrepareForGenerationalCompactionObjectClosure::is_compact_same_region() {
235   return (_from_region == _old_to_region) || (_from_region == _young_to_region);
236 }
237 
238 void ShenandoahPrepareForGenerationalCompactionObjectClosure::do_object(oop p) {
239   assert(_from_region != nullptr, "must set before work");
240   assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
241          "Object must reside in _from_region");
242   assert(_heap->global_generation()->complete_marking_context()->is_marked(p), "must be marked");
243   assert(!_heap->global_generation()->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
244 
245   size_t old_size = p->size();
246   size_t new_size = p->copy_size(old_size, p->mark());
247   uint from_region_age = _from_region->age();
248   uint object_age = p->age();
249 
250   bool promote_object = false;
251   if ((_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION) &&
252       _heap->age_census()->is_tenurable(from_region_age + object_age)) {
253     if ((_old_to_region != nullptr) && (_old_compact_point + new_size > _old_to_region->end())) {
254       finish_old_region();
255       _old_to_region = nullptr;
256     }
257     if (_old_to_region == nullptr) {
258       if (_empty_regions_pos < _empty_regions.length()) {
259         ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos);
260         _empty_regions_pos++;
261         new_to_region->set_affiliation(OLD_GENERATION);
262         _old_to_region = new_to_region;
263         _old_compact_point = _old_to_region->bottom();
264         promote_object = true;
265       }
266       // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so
267       // we leave promote_object as false, deferring the promotion.
268     } else {
269       promote_object = true;
270     }
271   }
272 
273   if (promote_object || (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION)) {
274     assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region");
275     size_t obj_size = _old_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
276     if (_old_compact_point + obj_size > _old_to_region->end()) {
277       ShenandoahHeapRegion* new_to_region;
278 
279       log_debug(gc)("Worker %u finishing old region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu"
280       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _old_to_region->index(),
281               p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
282 
283       // Object does not fit.  Get a new _old_to_region.
284       finish_old_region();
285       if (_empty_regions_pos < _empty_regions.length()) {
286         new_to_region = _empty_regions.at(_empty_regions_pos);
287         _empty_regions_pos++;
288         new_to_region->set_affiliation(OLD_GENERATION);
289       } else {
290         // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
291         // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
292         // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
293         new_to_region = _from_region;
294       }
295 
296       assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
297       assert(new_to_region != nullptr, "must not be nullptr");
298       _old_to_region = new_to_region;
299       _old_compact_point = _old_to_region->bottom();
300       obj_size = _old_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
301     }
302 
303     // Object fits into current region, record new location, if object does not move:
304     assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
305     shenandoah_assert_not_forwarded(nullptr, p);
306     if (_old_compact_point != cast_from_oop<HeapWord*>(p)) {
307       _preserved_marks->push_if_necessary(p, p->mark());
308       FullGCForwarding::forward_to(p, cast_to_oop(_old_compact_point));
309     }
310     _old_compact_point += obj_size;
311   } else {
312     assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION,
313            "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
314     assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region");
315 
316     // After full gc compaction, all regions have age 0.  Embed the region's age into the object's age in order to preserve
317     // tenuring progress.
318     if (_heap->is_aging_cycle()) {
319       ShenandoahHeap::increase_object_age(p, from_region_age + 1);
320     } else {
321       ShenandoahHeap::increase_object_age(p, from_region_age);
322     }
323 
324     size_t obj_size = _young_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
325     if (_young_compact_point + obj_size > _young_to_region->end()) {
326       ShenandoahHeapRegion* new_to_region;
327 
328       log_debug(gc)("Worker %u finishing young region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu"
329       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _young_to_region->index(),
330               p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
331 
332       // Object does not fit.  Get a new _young_to_region.
333       finish_young_region();
334       if (_empty_regions_pos < _empty_regions.length()) {
335         new_to_region = _empty_regions.at(_empty_regions_pos);
336         _empty_regions_pos++;
337         new_to_region->set_affiliation(YOUNG_GENERATION);
338       } else {
339         // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
340         // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
341         // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
342         new_to_region = _from_region;
343       }
344 
345       assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
346       assert(new_to_region != nullptr, "must not be nullptr");
347       _young_to_region = new_to_region;
348       obj_size = _young_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
349       _young_compact_point = _young_to_region->bottom();
350     }
351 
352     // Object fits into current region, record new location, if object does not move:
353     assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
354     shenandoah_assert_not_forwarded(nullptr, p);
355 
356     if (_young_compact_point != cast_from_oop<HeapWord*>(p)) {
357       _preserved_marks->push_if_necessary(p, p->mark());
358       FullGCForwarding::forward_to(p, cast_to_oop(_young_compact_point));
359     }
360     _young_compact_point += obj_size;
361   }
362 }