1 /*
  2  * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/serial/cardTableRS.hpp"
 27 #include "gc/serial/serialBlockOffsetTable.inline.hpp"
 28 #include "gc/serial/serialFullGC.hpp"
 29 #include "gc/serial/serialHeap.hpp"
 30 #include "gc/serial/tenuredGeneration.inline.hpp"
 31 #include "gc/shared/collectorCounters.hpp"
 32 #include "gc/shared/gcLocker.hpp"
 33 #include "gc/shared/gcTimer.hpp"
 34 #include "gc/shared/gcTrace.hpp"
 35 #include "gc/shared/genArguments.hpp"
 36 #include "gc/shared/space.hpp"
 37 #include "gc/shared/spaceDecorator.hpp"
 38 #include "logging/log.hpp"
 39 #include "memory/allocation.inline.hpp"
 40 #include "oops/oop.inline.hpp"
 41 #include "runtime/java.hpp"
 42 #include "utilities/copy.hpp"
 43 #include "utilities/macros.hpp"
 44 
 45 bool TenuredGeneration::grow_by(size_t bytes) {
 46   assert_correct_size_change_locking();
 47   bool result = _virtual_space.expand_by(bytes);
 48   if (result) {
 49     size_t new_word_size =
 50        heap_word_size(_virtual_space.committed_size());
 51     MemRegion mr(space()->bottom(), new_word_size);
 52     // Expand card table
 53     SerialHeap::heap()->rem_set()->resize_covered_region(mr);
 54     // Expand shared block offset array
 55     _bts->resize(new_word_size);
 56 
 57     // Fix for bug #4668531
 58     if (ZapUnusedHeapArea) {
 59       MemRegion mangle_region(space()->end(), (HeapWord*)_virtual_space.high());
 60       SpaceMangler::mangle_region(mangle_region);
 61     }
 62 
 63     // Expand space -- also expands space's BOT
 64     // (which uses (part of) shared array above)
 65     space()->set_end((HeapWord*)_virtual_space.high());
 66 
 67     // update the space and generation capacity counters
 68     update_counters();
 69 
 70     size_t new_mem_size = _virtual_space.committed_size();
 71     size_t old_mem_size = new_mem_size - bytes;
 72     log_trace(gc, heap)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
 73                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
 74   }
 75   return result;
 76 }
 77 
 78 bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
 79   assert_locked_or_safepoint(Heap_lock);
 80   if (bytes == 0) {
 81     return true;  // That's what grow_by(0) would return
 82   }
 83   size_t aligned_bytes  = ReservedSpace::page_align_size_up(bytes);
 84   if (aligned_bytes == 0){
 85     // The alignment caused the number of bytes to wrap.  An expand_by(0) will
 86     // return true with the implication that an expansion was done when it
 87     // was not.  A call to expand implies a best effort to expand by "bytes"
 88     // but not a guarantee.  Align down to give a best effort.  This is likely
 89     // the most that the generation can expand since it has some capacity to
 90     // start with.
 91     aligned_bytes = ReservedSpace::page_align_size_down(bytes);
 92   }
 93   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
 94   bool success = false;
 95   if (aligned_expand_bytes > aligned_bytes) {
 96     success = grow_by(aligned_expand_bytes);
 97   }
 98   if (!success) {
 99     success = grow_by(aligned_bytes);
100   }
101   if (!success) {
102     success = grow_to_reserved();
103   }
104   if (success && GCLocker::is_active_and_needs_gc()) {
105     log_trace(gc, heap)("Garbage collection disabled, expanded heap instead");
106   }
107 
108   return success;
109 }
110 
111 bool TenuredGeneration::grow_to_reserved() {
112   assert_correct_size_change_locking();
113   bool success = true;
114   const size_t remaining_bytes = _virtual_space.uncommitted_size();
115   if (remaining_bytes > 0) {
116     success = grow_by(remaining_bytes);
117     DEBUG_ONLY(if (!success) log_warning(gc)("grow to reserved failed");)
118   }
119   return success;
120 }
121 
122 void TenuredGeneration::shrink(size_t bytes) {
123   assert_correct_size_change_locking();
124 
125   size_t size = ReservedSpace::page_align_size_down(bytes);
126   if (size == 0) {
127     return;
128   }
129 
130   // Shrink committed space
131   _virtual_space.shrink_by(size);
132   // Shrink space; this also shrinks the space's BOT
133   space()->set_end((HeapWord*) _virtual_space.high());
134   size_t new_word_size = heap_word_size(space()->capacity());
135   // Shrink the shared block offset array
136   _bts->resize(new_word_size);
137   MemRegion mr(space()->bottom(), new_word_size);
138   // Shrink the card table
139   SerialHeap::heap()->rem_set()->resize_covered_region(mr);
140 
141   size_t new_mem_size = _virtual_space.committed_size();
142   size_t old_mem_size = new_mem_size + size;
143   log_trace(gc, heap)("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
144                       name(), old_mem_size/K, new_mem_size/K);
145 }
146 
147 void TenuredGeneration::compute_new_size_inner() {
148   assert(_shrink_factor <= 100, "invalid shrink factor");
149   size_t current_shrink_factor = _shrink_factor;
150   if (ShrinkHeapInSteps) {
151     // Always reset '_shrink_factor' if the heap is shrunk in steps.
152     // If we shrink the heap in this iteration, '_shrink_factor' will
153     // be recomputed based on the old value further down in this function.
154     _shrink_factor = 0;
155   }
156 
157   // We don't have floating point command-line arguments
158   // Note:  argument processing ensures that MinHeapFreeRatio < 100.
159   const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
160   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
161 
162   // Compute some numbers about the state of the heap.
163   const size_t used_after_gc = used();
164   const size_t capacity_after_gc = capacity();
165 
166   const double min_tmp = used_after_gc / maximum_used_percentage;
167   size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
168   // Don't shrink less than the initial generation size
169   minimum_desired_capacity = MAX2(minimum_desired_capacity, OldSize);
170   assert(used_after_gc <= minimum_desired_capacity, "sanity check");
171 
172     const size_t free_after_gc = free();
173     const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
174     log_trace(gc, heap)("TenuredGeneration::compute_new_size:");
175     log_trace(gc, heap)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
176                   minimum_free_percentage,
177                   maximum_used_percentage);
178     log_trace(gc, heap)("     free_after_gc   : %6.1fK   used_after_gc   : %6.1fK   capacity_after_gc   : %6.1fK",
179                   free_after_gc / (double) K,
180                   used_after_gc / (double) K,
181                   capacity_after_gc / (double) K);
182     log_trace(gc, heap)("     free_percentage: %6.2f", free_percentage);
183 
184   if (capacity_after_gc < minimum_desired_capacity) {
185     // If we have less free space than we want then expand
186     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
187     // Don't expand unless it's significant
188     if (expand_bytes >= _min_heap_delta_bytes) {
189       expand(expand_bytes, 0); // safe if expansion fails
190     }
191     log_trace(gc, heap)("    expanding:  minimum_desired_capacity: %6.1fK  expand_bytes: %6.1fK  _min_heap_delta_bytes: %6.1fK",
192                   minimum_desired_capacity / (double) K,
193                   expand_bytes / (double) K,
194                   _min_heap_delta_bytes / (double) K);
195     return;
196   }
197 
198   // No expansion, now see if we want to shrink
199   size_t shrink_bytes = 0;
200   // We would never want to shrink more than this
201   size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
202 
203   if (MaxHeapFreeRatio < 100) {
204     const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
205     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
206     const double max_tmp = used_after_gc / minimum_used_percentage;
207     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
208     maximum_desired_capacity = MAX2(maximum_desired_capacity, OldSize);
209     log_trace(gc, heap)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
210                              maximum_free_percentage, minimum_used_percentage);
211     log_trace(gc, heap)("    _capacity_at_prologue: %6.1fK  minimum_desired_capacity: %6.1fK  maximum_desired_capacity: %6.1fK",
212                              _capacity_at_prologue / (double) K,
213                              minimum_desired_capacity / (double) K,
214                              maximum_desired_capacity / (double) K);
215     assert(minimum_desired_capacity <= maximum_desired_capacity,
216            "sanity check");
217 
218     if (capacity_after_gc > maximum_desired_capacity) {
219       // Capacity too large, compute shrinking size
220       shrink_bytes = capacity_after_gc - maximum_desired_capacity;
221       if (ShrinkHeapInSteps) {
222         // If ShrinkHeapInSteps is true (the default),
223         // we don't want to shrink all the way back to initSize if people call
224         // System.gc(), because some programs do that between "phases" and then
225         // we'd just have to grow the heap up again for the next phase.  So we
226         // damp the shrinking: 0% on the first call, 10% on the second call, 40%
227         // on the third call, and 100% by the fourth call.  But if we recompute
228         // size without shrinking, it goes back to 0%.
229         shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
230         if (current_shrink_factor == 0) {
231           _shrink_factor = 10;
232         } else {
233           _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
234         }
235       }
236       assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
237       log_trace(gc, heap)("    shrinking:  initSize: %.1fK  maximum_desired_capacity: %.1fK",
238                                OldSize / (double) K, maximum_desired_capacity / (double) K);
239       log_trace(gc, heap)("    shrink_bytes: %.1fK  current_shrink_factor: " SIZE_FORMAT "  new shrink factor: " SIZE_FORMAT "  _min_heap_delta_bytes: %.1fK",
240                                shrink_bytes / (double) K,
241                                current_shrink_factor,
242                                _shrink_factor,
243                                _min_heap_delta_bytes / (double) K);
244     }
245   }
246 
247   if (capacity_after_gc > _capacity_at_prologue) {
248     // We might have expanded for promotions, in which case we might want to
249     // take back that expansion if there's room after GC.  That keeps us from
250     // stretching the heap with promotions when there's plenty of room.
251     size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
252     expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
253     // We have two shrinking computations, take the largest
254     shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
255     assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
256     log_trace(gc, heap)("    aggressive shrinking:  _capacity_at_prologue: %.1fK  capacity_after_gc: %.1fK  expansion_for_promotion: %.1fK  shrink_bytes: %.1fK",
257                         capacity_after_gc / (double) K,
258                         _capacity_at_prologue / (double) K,
259                         expansion_for_promotion / (double) K,
260                         shrink_bytes / (double) K);
261   }
262   // Don't shrink unless it's significant
263   if (shrink_bytes >= _min_heap_delta_bytes) {
264     shrink(shrink_bytes);
265   }
266 }
267 
268 HeapWord* TenuredGeneration::block_start(const void* addr) const {
269   HeapWord* cur_block = _bts->block_start_reaching_into_card(addr);
270 
271   while (true) {
272     HeapWord* next_block = cur_block + cast_to_oop(cur_block)->size();
273     if (next_block > addr) {
274       assert(cur_block <= addr, "postcondition");
275       return cur_block;
276     }
277     cur_block = next_block;
278     // Because the BOT is precise, we should never step into the next card
279     // (i.e. crossing the card boundary).
280     assert(!SerialBlockOffsetTable::is_crossing_card_boundary(cur_block, (HeapWord*)addr), "must be");
281   }
282 }
283 
284 void TenuredGeneration::scan_old_to_young_refs(HeapWord* saved_top_in_old_gen) {
285   _rs->scan_old_to_young_refs(this, saved_top_in_old_gen);
286 }
287 
288 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
289                                      size_t initial_byte_size,
290                                      size_t min_byte_size,
291                                      size_t max_byte_size,
292                                      CardTableRS* remset) :
293   Generation(rs, initial_byte_size), _rs(remset),
294   _min_heap_delta_bytes(), _capacity_at_prologue(),
295   _used_at_prologue()
296 {
297   // If we don't shrink the heap in steps, '_shrink_factor' is always 100%.
298   _shrink_factor = ShrinkHeapInSteps ? 0 : 100;
299   HeapWord* start = (HeapWord*)rs.base();
300   size_t reserved_byte_size = rs.size();
301   assert((uintptr_t(start) & 3) == 0, "bad alignment");
302   assert((reserved_byte_size & 3) == 0, "bad alignment");
303   MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
304   _bts = new SerialBlockOffsetTable(reserved_mr,
305                                     heap_word_size(initial_byte_size));
306   MemRegion committed_mr(start, heap_word_size(initial_byte_size));
307   _rs->resize_covered_region(committed_mr);
308 
309   // Verify that the start and end of this generation is the start of a card.
310   // If this wasn't true, a single card could span more than on generation,
311   // which would cause problems when we commit/uncommit memory, and when we
312   // clear and dirty cards.
313   guarantee(CardTable::is_card_aligned(reserved_mr.start()), "generation must be card aligned");
314   guarantee(CardTable::is_card_aligned(reserved_mr.end()), "generation must be card aligned");
315   _min_heap_delta_bytes = MinHeapDeltaBytes;
316   _capacity_at_prologue = initial_byte_size;
317   _used_at_prologue = 0;
318   HeapWord* bottom = (HeapWord*) _virtual_space.low();
319   HeapWord* end    = (HeapWord*) _virtual_space.high();
320   _the_space  = new ContiguousSpace();
321   _the_space->initialize(MemRegion(bottom, end), SpaceDecorator::Clear, SpaceDecorator::Mangle);
322   // If we don't shrink the heap in steps, '_shrink_factor' is always 100%.
323   _shrink_factor = ShrinkHeapInSteps ? 0 : 100;
324   _capacity_at_prologue = 0;
325 
326   _avg_promoted = new AdaptivePaddedNoZeroDevAverage(AdaptiveSizePolicyWeight, PromotedPadding);
327 
328   // initialize performance counters
329 
330   const char* gen_name = "old";
331   // Generation Counters -- generation 1, 1 subspace
332   _gen_counters = new GenerationCounters(gen_name, 1, 1,
333       min_byte_size, max_byte_size, &_virtual_space);
334 
335   _gc_counters = new CollectorCounters("Serial full collection pauses", 1);
336 
337   _space_counters = new CSpaceCounters(gen_name, 0,
338                                        _virtual_space.reserved_size(),
339                                        _the_space, _gen_counters);
340 }
341 
342 void TenuredGeneration::gc_prologue() {
343   _capacity_at_prologue = capacity();
344   _used_at_prologue = used();
345 }
346 
347 void TenuredGeneration::compute_new_size() {
348   assert_locked_or_safepoint(Heap_lock);
349 
350   // Compute some numbers about the state of the heap.
351   const size_t used_after_gc = used();
352   const size_t capacity_after_gc = capacity();
353 
354   compute_new_size_inner();
355 
356   assert(used() == used_after_gc && used_after_gc <= capacity(),
357          "used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
358          " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity());
359 }
360 
361 void TenuredGeneration::update_promote_stats() {
362   size_t used_after_gc = used();
363   size_t promoted_in_bytes;
364   if (used_after_gc > _used_at_prologue) {
365     promoted_in_bytes = used_after_gc - _used_at_prologue;
366   } else {
367     promoted_in_bytes = 0;
368   }
369   _avg_promoted->sample(promoted_in_bytes);
370 }
371 
372 void TenuredGeneration::update_counters() {
373   if (UsePerfData) {
374     _space_counters->update_all();
375     _gen_counters->update_all();
376   }
377 }
378 
379 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
380   size_t available = _the_space->free() + _virtual_space.uncommitted_size();
381 
382   size_t avg_promoted  = (size_t)_avg_promoted->padded_average();
383   size_t promotion_estimate = MIN2(avg_promoted, max_promotion_in_bytes);
384 
385   bool res = (promotion_estimate <= available);
386 
387   log_trace(gc)("Tenured: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
388     res? "":" not", available, res? ">=":"<", avg_promoted, max_promotion_in_bytes);
389 
390   return res;
391 }
392 
393 oop TenuredGeneration::allocate_for_promotion(oop obj, size_t obj_size) {
394   assert(obj_size == obj->size() || UseCompactObjectHeaders, "bad obj_size passed in");
395 
396 #ifndef PRODUCT
397   if (SerialHeap::heap()->promotion_should_fail()) {
398     return nullptr;
399   }
400 #endif  // #ifndef PRODUCT
401 
402   // Allocate new object.
403   HeapWord* result = allocate(obj_size);
404   if (result == nullptr) {
405     // Promotion of obj into gen failed.  Try to expand and allocate.
406     result = expand_and_allocate(obj_size);
407   }
408 
409   return cast_to_oop<HeapWord*>(result);
410 }
411 
412 HeapWord*
413 TenuredGeneration::expand_and_allocate(size_t word_size) {
414   expand(word_size*HeapWordSize, _min_heap_delta_bytes);
415   return allocate(word_size);
416 }
417 
418 void TenuredGeneration::assert_correct_size_change_locking() {
419   assert_locked_or_safepoint(Heap_lock);
420 }
421 
422 void TenuredGeneration::object_iterate(ObjectClosure* blk) {
423   _the_space->object_iterate(blk);
424 }
425 
426 void TenuredGeneration::complete_loaded_archive_space(MemRegion archive_space) {
427   // Create the BOT for the archive space.
428   HeapWord* start = archive_space.start();
429   while (start < archive_space.end()) {
430     size_t word_size = cast_to_oop(start)->size();;
431     _bts->update_for_block(start, start + word_size);
432     start += word_size;
433   }
434 }
435 
436 void TenuredGeneration::gc_epilogue() {
437   // update the generation and space performance counters
438   update_counters();
439 }
440 
441 void TenuredGeneration::verify() {
442   _the_space->verify();
443 }
444 
445 void TenuredGeneration::print_on(outputStream* st)  const {
446   st->print(" %-10s", name());
447 
448   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
449             capacity()/K, used()/K);
450   st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
451                p2i(_virtual_space.low_boundary()),
452                p2i(_virtual_space.high()),
453                p2i(_virtual_space.high_boundary()));
454 
455   st->print("   the");
456   _the_space->print_on(st);
457 }