1 /*
  2  * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "gc/serial/cardTableRS.hpp"
 26 #include "gc/serial/serialBlockOffsetTable.inline.hpp"
 27 #include "gc/serial/serialFullGC.hpp"
 28 #include "gc/serial/serialHeap.hpp"
 29 #include "gc/serial/tenuredGeneration.inline.hpp"
 30 #include "gc/shared/collectorCounters.hpp"
 31 #include "gc/shared/gcLocker.hpp"
 32 #include "gc/shared/gcTimer.hpp"
 33 #include "gc/shared/gcTrace.hpp"
 34 #include "gc/shared/genArguments.hpp"
 35 #include "gc/shared/space.hpp"
 36 #include "gc/shared/spaceDecorator.hpp"
 37 #include "logging/log.hpp"
 38 #include "memory/allocation.inline.hpp"
 39 #include "oops/oop.inline.hpp"
 40 #include "runtime/java.hpp"
 41 #include "utilities/copy.hpp"
 42 #include "utilities/macros.hpp"
 43 
 44 bool TenuredGeneration::grow_by(size_t bytes) {
 45   assert_correct_size_change_locking();
 46   bool result = _virtual_space.expand_by(bytes);
 47   if (result) {
 48     size_t new_word_size =
 49        heap_word_size(_virtual_space.committed_size());
 50     MemRegion mr(space()->bottom(), new_word_size);
 51     // Expand card table
 52     SerialHeap::heap()->rem_set()->resize_covered_region(mr);
 53     // Expand shared block offset array
 54     _bts->resize(new_word_size);
 55 
 56     // Fix for bug #4668531
 57     if (ZapUnusedHeapArea) {
 58       MemRegion mangle_region(space()->end(), (HeapWord*)_virtual_space.high());
 59       SpaceMangler::mangle_region(mangle_region);
 60     }
 61 
 62     // Expand space -- also expands space's BOT
 63     // (which uses (part of) shared array above)
 64     space()->set_end((HeapWord*)_virtual_space.high());
 65 
 66     // update the space and generation capacity counters
 67     update_counters();
 68 
 69     size_t new_mem_size = _virtual_space.committed_size();
 70     size_t old_mem_size = new_mem_size - bytes;
 71     log_trace(gc, heap)("Expanding %s from %zuK by %zuK to %zuK",
 72                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
 73   }
 74   return result;
 75 }
 76 
 77 bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
 78   assert_locked_or_safepoint(Heap_lock);
 79   if (bytes == 0) {
 80     return true;  // That's what grow_by(0) would return
 81   }
 82   size_t aligned_bytes = os::align_up_vm_page_size(bytes);
 83   if (aligned_bytes == 0){
 84     // The alignment caused the number of bytes to wrap.  An expand_by(0) will
 85     // return true with the implication that an expansion was done when it
 86     // was not.  A call to expand implies a best effort to expand by "bytes"
 87     // but not a guarantee.  Align down to give a best effort.  This is likely
 88     // the most that the generation can expand since it has some capacity to
 89     // start with.
 90     aligned_bytes = os::align_down_vm_page_size(bytes);
 91   }
 92   size_t aligned_expand_bytes = os::align_up_vm_page_size(expand_bytes);
 93   bool success = false;
 94   if (aligned_expand_bytes > aligned_bytes) {
 95     success = grow_by(aligned_expand_bytes);
 96   }
 97   if (!success) {
 98     success = grow_by(aligned_bytes);
 99   }
100   if (!success) {
101     success = grow_to_reserved();
102   }
103   if (success && GCLocker::is_active_and_needs_gc()) {
104     log_trace(gc, heap)("Garbage collection disabled, expanded heap instead");
105   }
106 
107   return success;
108 }
109 
110 bool TenuredGeneration::grow_to_reserved() {
111   assert_correct_size_change_locking();
112   bool success = true;
113   const size_t remaining_bytes = _virtual_space.uncommitted_size();
114   if (remaining_bytes > 0) {
115     success = grow_by(remaining_bytes);
116     DEBUG_ONLY(if (!success) log_warning(gc)("grow to reserved failed");)
117   }
118   return success;
119 }
120 
121 void TenuredGeneration::shrink(size_t bytes) {
122   assert_correct_size_change_locking();
123 
124   size_t size = os::align_down_vm_page_size(bytes);
125   if (size == 0) {
126     return;
127   }
128 
129   // Shrink committed space
130   _virtual_space.shrink_by(size);
131   // Shrink space; this also shrinks the space's BOT
132   space()->set_end((HeapWord*) _virtual_space.high());
133   size_t new_word_size = heap_word_size(space()->capacity());
134   // Shrink the shared block offset array
135   _bts->resize(new_word_size);
136   MemRegion mr(space()->bottom(), new_word_size);
137   // Shrink the card table
138   SerialHeap::heap()->rem_set()->resize_covered_region(mr);
139 
140   size_t new_mem_size = _virtual_space.committed_size();
141   size_t old_mem_size = new_mem_size + size;
142   log_trace(gc, heap)("Shrinking %s from %zuK to %zuK",
143                       name(), old_mem_size/K, new_mem_size/K);
144 }
145 
146 void TenuredGeneration::compute_new_size_inner() {
147   assert(_shrink_factor <= 100, "invalid shrink factor");
148   size_t current_shrink_factor = _shrink_factor;
149   if (ShrinkHeapInSteps) {
150     // Always reset '_shrink_factor' if the heap is shrunk in steps.
151     // If we shrink the heap in this iteration, '_shrink_factor' will
152     // be recomputed based on the old value further down in this function.
153     _shrink_factor = 0;
154   }
155 
156   // We don't have floating point command-line arguments
157   // Note:  argument processing ensures that MinHeapFreeRatio < 100.
158   const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
159   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
160 
161   // Compute some numbers about the state of the heap.
162   const size_t used_after_gc = used();
163   const size_t capacity_after_gc = capacity();
164 
165   const double min_tmp = used_after_gc / maximum_used_percentage;
166   size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
167   // Don't shrink less than the initial generation size
168   minimum_desired_capacity = MAX2(minimum_desired_capacity, OldSize);
169   assert(used_after_gc <= minimum_desired_capacity, "sanity check");
170 
171     const size_t free_after_gc = free();
172     const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
173     log_trace(gc, heap)("TenuredGeneration::compute_new_size:");
174     log_trace(gc, heap)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
175                   minimum_free_percentage,
176                   maximum_used_percentage);
177     log_trace(gc, heap)("     free_after_gc   : %6.1fK   used_after_gc   : %6.1fK   capacity_after_gc   : %6.1fK",
178                   free_after_gc / (double) K,
179                   used_after_gc / (double) K,
180                   capacity_after_gc / (double) K);
181     log_trace(gc, heap)("     free_percentage: %6.2f", free_percentage);
182 
183   if (capacity_after_gc < minimum_desired_capacity) {
184     // If we have less free space than we want then expand
185     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
186     // Don't expand unless it's significant
187     if (expand_bytes >= _min_heap_delta_bytes) {
188       expand(expand_bytes, 0); // safe if expansion fails
189     }
190     log_trace(gc, heap)("    expanding:  minimum_desired_capacity: %6.1fK  expand_bytes: %6.1fK  _min_heap_delta_bytes: %6.1fK",
191                   minimum_desired_capacity / (double) K,
192                   expand_bytes / (double) K,
193                   _min_heap_delta_bytes / (double) K);
194     return;
195   }
196 
197   // No expansion, now see if we want to shrink
198   size_t shrink_bytes = 0;
199   // We would never want to shrink more than this
200   size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
201 
202   if (MaxHeapFreeRatio < 100) {
203     const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
204     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
205     const double max_tmp = used_after_gc / minimum_used_percentage;
206     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
207     maximum_desired_capacity = MAX2(maximum_desired_capacity, OldSize);
208     log_trace(gc, heap)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
209                              maximum_free_percentage, minimum_used_percentage);
210     log_trace(gc, heap)("    _capacity_at_prologue: %6.1fK  minimum_desired_capacity: %6.1fK  maximum_desired_capacity: %6.1fK",
211                              _capacity_at_prologue / (double) K,
212                              minimum_desired_capacity / (double) K,
213                              maximum_desired_capacity / (double) K);
214     assert(minimum_desired_capacity <= maximum_desired_capacity,
215            "sanity check");
216 
217     if (capacity_after_gc > maximum_desired_capacity) {
218       // Capacity too large, compute shrinking size
219       shrink_bytes = capacity_after_gc - maximum_desired_capacity;
220       if (ShrinkHeapInSteps) {
221         // If ShrinkHeapInSteps is true (the default),
222         // we don't want to shrink all the way back to initSize if people call
223         // System.gc(), because some programs do that between "phases" and then
224         // we'd just have to grow the heap up again for the next phase.  So we
225         // damp the shrinking: 0% on the first call, 10% on the second call, 40%
226         // on the third call, and 100% by the fourth call.  But if we recompute
227         // size without shrinking, it goes back to 0%.
228         shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
229         if (current_shrink_factor == 0) {
230           _shrink_factor = 10;
231         } else {
232           _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
233         }
234       }
235       assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
236       log_trace(gc, heap)("    shrinking:  initSize: %.1fK  maximum_desired_capacity: %.1fK",
237                                OldSize / (double) K, maximum_desired_capacity / (double) K);
238       log_trace(gc, heap)("    shrink_bytes: %.1fK  current_shrink_factor: %zu  new shrink factor: %zu  _min_heap_delta_bytes: %.1fK",
239                                shrink_bytes / (double) K,
240                                current_shrink_factor,
241                                _shrink_factor,
242                                _min_heap_delta_bytes / (double) K);
243     }
244   }
245 
246   if (capacity_after_gc > _capacity_at_prologue) {
247     // We might have expanded for promotions, in which case we might want to
248     // take back that expansion if there's room after GC.  That keeps us from
249     // stretching the heap with promotions when there's plenty of room.
250     size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
251     expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
252     // We have two shrinking computations, take the largest
253     shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
254     assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
255     log_trace(gc, heap)("    aggressive shrinking:  _capacity_at_prologue: %.1fK  capacity_after_gc: %.1fK  expansion_for_promotion: %.1fK  shrink_bytes: %.1fK",
256                         capacity_after_gc / (double) K,
257                         _capacity_at_prologue / (double) K,
258                         expansion_for_promotion / (double) K,
259                         shrink_bytes / (double) K);
260   }
261   // Don't shrink unless it's significant
262   if (shrink_bytes >= _min_heap_delta_bytes) {
263     shrink(shrink_bytes);
264   }
265 }
266 
267 HeapWord* TenuredGeneration::block_start(const void* addr) const {
268   HeapWord* cur_block = _bts->block_start_reaching_into_card(addr);
269 
270   while (true) {
271     HeapWord* next_block = cur_block + cast_to_oop(cur_block)->size();
272     if (next_block > addr) {
273       assert(cur_block <= addr, "postcondition");
274       return cur_block;
275     }
276     cur_block = next_block;
277     // Because the BOT is precise, we should never step into the next card
278     // (i.e. crossing the card boundary).
279     assert(!SerialBlockOffsetTable::is_crossing_card_boundary(cur_block, (HeapWord*)addr), "must be");
280   }
281 }
282 
283 void TenuredGeneration::scan_old_to_young_refs(HeapWord* saved_top_in_old_gen) {
284   _rs->scan_old_to_young_refs(this, saved_top_in_old_gen);
285 }
286 
287 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
288                                      size_t initial_byte_size,
289                                      size_t min_byte_size,
290                                      size_t max_byte_size,
291                                      CardTableRS* remset) :
292   Generation(rs, initial_byte_size), _rs(remset),
293   _min_heap_delta_bytes(), _capacity_at_prologue(),
294   _used_at_prologue()
295 {
296   // If we don't shrink the heap in steps, '_shrink_factor' is always 100%.
297   _shrink_factor = ShrinkHeapInSteps ? 0 : 100;
298   HeapWord* start = (HeapWord*)rs.base();
299   size_t reserved_byte_size = rs.size();
300   assert((uintptr_t(start) & 3) == 0, "bad alignment");
301   assert((reserved_byte_size & 3) == 0, "bad alignment");
302   MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
303   _bts = new SerialBlockOffsetTable(reserved_mr,
304                                     heap_word_size(initial_byte_size));
305   MemRegion committed_mr(start, heap_word_size(initial_byte_size));
306   _rs->resize_covered_region(committed_mr);
307 
308   // Verify that the start and end of this generation is the start of a card.
309   // If this wasn't true, a single card could span more than on generation,
310   // which would cause problems when we commit/uncommit memory, and when we
311   // clear and dirty cards.
312   guarantee(CardTable::is_card_aligned(reserved_mr.start()), "generation must be card aligned");
313   guarantee(CardTable::is_card_aligned(reserved_mr.end()), "generation must be card aligned");
314   _min_heap_delta_bytes = MinHeapDeltaBytes;
315   _capacity_at_prologue = initial_byte_size;
316   _used_at_prologue = 0;
317   HeapWord* bottom = (HeapWord*) _virtual_space.low();
318   HeapWord* end    = (HeapWord*) _virtual_space.high();
319   _the_space  = new ContiguousSpace();
320   _the_space->initialize(MemRegion(bottom, end), SpaceDecorator::Clear, SpaceDecorator::Mangle);
321   // If we don't shrink the heap in steps, '_shrink_factor' is always 100%.
322   _shrink_factor = ShrinkHeapInSteps ? 0 : 100;
323   _capacity_at_prologue = 0;
324 
325   _avg_promoted = new AdaptivePaddedNoZeroDevAverage(AdaptiveSizePolicyWeight, PromotedPadding);
326 
327   // initialize performance counters
328 
329   const char* gen_name = "old";
330   // Generation Counters -- generation 1, 1 subspace
331   _gen_counters = new GenerationCounters(gen_name, 1, 1,
332       min_byte_size, max_byte_size, &_virtual_space);
333 
334   _gc_counters = new CollectorCounters("Serial full collection pauses", 1);
335 
336   _space_counters = new CSpaceCounters(gen_name, 0,
337                                        _virtual_space.reserved_size(),
338                                        _the_space, _gen_counters);
339 }
340 
341 void TenuredGeneration::gc_prologue() {
342   _capacity_at_prologue = capacity();
343   _used_at_prologue = used();
344 }
345 
346 void TenuredGeneration::compute_new_size() {
347   assert_locked_or_safepoint(Heap_lock);
348 
349   // Compute some numbers about the state of the heap.
350   const size_t used_after_gc = used();
351   const size_t capacity_after_gc = capacity();
352 
353   compute_new_size_inner();
354 
355   assert(used() == used_after_gc && used_after_gc <= capacity(),
356          "used: %zu used_after_gc: %zu"
357          " capacity: %zu", used(), used_after_gc, capacity());
358 }
359 
360 void TenuredGeneration::update_promote_stats() {
361   size_t used_after_gc = used();
362   size_t promoted_in_bytes;
363   if (used_after_gc > _used_at_prologue) {
364     promoted_in_bytes = used_after_gc - _used_at_prologue;
365   } else {
366     promoted_in_bytes = 0;
367   }
368   _avg_promoted->sample(promoted_in_bytes);
369 }
370 
371 void TenuredGeneration::update_counters() {
372   if (UsePerfData) {
373     _space_counters->update_all();
374     _gen_counters->update_all();
375   }
376 }
377 
378 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
379   size_t available = _the_space->free() + _virtual_space.uncommitted_size();
380 
381   size_t avg_promoted  = (size_t)_avg_promoted->padded_average();
382   size_t promotion_estimate = MIN2(avg_promoted, max_promotion_in_bytes);
383 
384   bool res = (promotion_estimate <= available);
385 
386   log_trace(gc)("Tenured: promo attempt is%s safe: available(%zu) %s av_promo(%zu), max_promo(%zu)",
387     res? "":" not", available, res? ">=":"<", avg_promoted, max_promotion_in_bytes);
388 
389   return res;
390 }
391 
392 oop TenuredGeneration::allocate_for_promotion(oop obj, size_t obj_size) {
393   assert(obj_size == obj->size() || UseCompactObjectHeaders, "bad obj_size passed in");
394 
395 #ifndef PRODUCT
396   if (SerialHeap::heap()->promotion_should_fail()) {
397     return nullptr;
398   }
399 #endif  // #ifndef PRODUCT
400 
401   // Allocate new object.
402   HeapWord* result = allocate(obj_size);
403   if (result == nullptr) {
404     // Promotion of obj into gen failed.  Try to expand and allocate.
405     result = expand_and_allocate(obj_size);
406   }
407 
408   return cast_to_oop<HeapWord*>(result);
409 }
410 
411 HeapWord*
412 TenuredGeneration::expand_and_allocate(size_t word_size) {
413   expand(word_size*HeapWordSize, _min_heap_delta_bytes);
414   return allocate(word_size);
415 }
416 
417 void TenuredGeneration::assert_correct_size_change_locking() {
418   assert_locked_or_safepoint(Heap_lock);
419 }
420 
421 void TenuredGeneration::object_iterate(ObjectClosure* blk) {
422   _the_space->object_iterate(blk);
423 }
424 
425 void TenuredGeneration::complete_loaded_archive_space(MemRegion archive_space) {
426   // Create the BOT for the archive space.
427   HeapWord* start = archive_space.start();
428   while (start < archive_space.end()) {
429     size_t word_size = cast_to_oop(start)->size();;
430     _bts->update_for_block(start, start + word_size);
431     start += word_size;
432   }
433 }
434 
435 void TenuredGeneration::gc_epilogue() {
436   // update the generation and space performance counters
437   update_counters();
438 }
439 
440 void TenuredGeneration::verify() {
441   _the_space->verify();
442 }
443 
444 void TenuredGeneration::print_on(outputStream* st)  const {
445   st->print(" %-10s", name());
446 
447   st->print(" total %zuK, used %zuK",
448             capacity()/K, used()/K);
449   st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
450                p2i(_virtual_space.low_boundary()),
451                p2i(_virtual_space.high()),
452                p2i(_virtual_space.high_boundary()));
453 
454   st->print("   the");
455   _the_space->print_on(st);
456 }