1 /*
2 * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/serial/cardTableRS.hpp"
26 #include "gc/serial/serialBlockOffsetTable.inline.hpp"
27 #include "gc/serial/serialFullGC.hpp"
28 #include "gc/serial/serialHeap.hpp"
29 #include "gc/serial/tenuredGeneration.inline.hpp"
30 #include "gc/shared/collectorCounters.hpp"
31 #include "gc/shared/gcLocker.hpp"
32 #include "gc/shared/gcTimer.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/genArguments.hpp"
35 #include "gc/shared/space.hpp"
36 #include "gc/shared/spaceDecorator.hpp"
37 #include "logging/log.hpp"
38 #include "memory/allocation.inline.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "runtime/java.hpp"
41 #include "utilities/copy.hpp"
42 #include "utilities/macros.hpp"
43
44 bool TenuredGeneration::grow_by(size_t bytes) {
45 assert_correct_size_change_locking();
46 bool result = _virtual_space.expand_by(bytes);
47 if (result) {
48 size_t new_word_size =
49 heap_word_size(_virtual_space.committed_size());
50 MemRegion mr(space()->bottom(), new_word_size);
51 // Expand card table
52 SerialHeap::heap()->rem_set()->resize_covered_region(mr);
53 // Expand shared block offset array
54 _bts->resize(new_word_size);
55
56 // Fix for bug #4668531
57 if (ZapUnusedHeapArea) {
58 MemRegion mangle_region(space()->end(), (HeapWord*)_virtual_space.high());
59 SpaceMangler::mangle_region(mangle_region);
60 }
61
62 // Expand space -- also expands space's BOT
63 // (which uses (part of) shared array above)
64 space()->set_end((HeapWord*)_virtual_space.high());
65
66 // update the space and generation capacity counters
67 update_counters();
68
69 size_t new_mem_size = _virtual_space.committed_size();
70 size_t old_mem_size = new_mem_size - bytes;
71 log_trace(gc, heap)("Expanding %s from %zuK by %zuK to %zuK",
72 name(), old_mem_size/K, bytes/K, new_mem_size/K);
73 }
74 return result;
75 }
76
77 bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
78 assert_locked_or_safepoint(Heap_lock);
79 if (bytes == 0) {
80 return true; // That's what grow_by(0) would return
81 }
82 size_t aligned_bytes = os::align_up_vm_page_size(bytes);
83 if (aligned_bytes == 0){
84 // The alignment caused the number of bytes to wrap. An expand_by(0) will
85 // return true with the implication that an expansion was done when it
86 // was not. A call to expand implies a best effort to expand by "bytes"
87 // but not a guarantee. Align down to give a best effort. This is likely
88 // the most that the generation can expand since it has some capacity to
89 // start with.
90 aligned_bytes = os::align_down_vm_page_size(bytes);
91 }
92 size_t aligned_expand_bytes = os::align_up_vm_page_size(expand_bytes);
93 bool success = false;
94 if (aligned_expand_bytes > aligned_bytes) {
95 success = grow_by(aligned_expand_bytes);
96 }
97 if (!success) {
98 success = grow_by(aligned_bytes);
99 }
100 if (!success) {
101 success = grow_to_reserved();
102 }
103
104 return success;
105 }
106
107 bool TenuredGeneration::grow_to_reserved() {
108 assert_correct_size_change_locking();
109 bool success = true;
110 const size_t remaining_bytes = _virtual_space.uncommitted_size();
111 if (remaining_bytes > 0) {
112 success = grow_by(remaining_bytes);
113 DEBUG_ONLY(if (!success) log_warning(gc)("grow to reserved failed");)
114 }
115 return success;
116 }
117
118 void TenuredGeneration::shrink(size_t bytes) {
119 assert_correct_size_change_locking();
120
121 size_t size = os::align_down_vm_page_size(bytes);
122 if (size == 0) {
123 return;
124 }
125
126 // Shrink committed space
127 _virtual_space.shrink_by(size);
128 // Shrink space; this also shrinks the space's BOT
129 space()->set_end((HeapWord*) _virtual_space.high());
130 size_t new_word_size = heap_word_size(space()->capacity());
131 // Shrink the shared block offset array
132 _bts->resize(new_word_size);
133 MemRegion mr(space()->bottom(), new_word_size);
134 // Shrink the card table
135 SerialHeap::heap()->rem_set()->resize_covered_region(mr);
136
137 size_t new_mem_size = _virtual_space.committed_size();
138 size_t old_mem_size = new_mem_size + size;
139 log_trace(gc, heap)("Shrinking %s from %zuK to %zuK",
140 name(), old_mem_size/K, new_mem_size/K);
141 }
142
143 void TenuredGeneration::compute_new_size_inner() {
144 assert(_shrink_factor <= 100, "invalid shrink factor");
145 size_t current_shrink_factor = _shrink_factor;
146 if (ShrinkHeapInSteps) {
147 // Always reset '_shrink_factor' if the heap is shrunk in steps.
148 // If we shrink the heap in this iteration, '_shrink_factor' will
149 // be recomputed based on the old value further down in this function.
150 _shrink_factor = 0;
151 }
152
153 // We don't have floating point command-line arguments
154 // Note: argument processing ensures that MinHeapFreeRatio < 100.
155 const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
156 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
157
158 // Compute some numbers about the state of the heap.
159 const size_t used_after_gc = used();
160 const size_t capacity_after_gc = capacity();
161
162 const double min_tmp = used_after_gc / maximum_used_percentage;
163 size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
164 // Don't shrink less than the initial generation size
165 minimum_desired_capacity = MAX2(minimum_desired_capacity, OldSize);
166 assert(used_after_gc <= minimum_desired_capacity, "sanity check");
167
168 const size_t free_after_gc = free();
169 const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
170 log_trace(gc, heap)("TenuredGeneration::compute_new_size:");
171 log_trace(gc, heap)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",
172 minimum_free_percentage,
173 maximum_used_percentage);
174 log_trace(gc, heap)(" free_after_gc : %6.1fK used_after_gc : %6.1fK capacity_after_gc : %6.1fK",
175 free_after_gc / (double) K,
176 used_after_gc / (double) K,
177 capacity_after_gc / (double) K);
178 log_trace(gc, heap)(" free_percentage: %6.2f", free_percentage);
179
180 if (capacity_after_gc < minimum_desired_capacity) {
181 // If we have less free space than we want then expand
182 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
183 // Don't expand unless it's significant
184 if (expand_bytes >= _min_heap_delta_bytes) {
185 expand(expand_bytes, 0); // safe if expansion fails
186 }
187 log_trace(gc, heap)(" expanding: minimum_desired_capacity: %6.1fK expand_bytes: %6.1fK _min_heap_delta_bytes: %6.1fK",
188 minimum_desired_capacity / (double) K,
189 expand_bytes / (double) K,
190 _min_heap_delta_bytes / (double) K);
191 return;
192 }
193
194 // No expansion, now see if we want to shrink
195 size_t shrink_bytes = 0;
196 // We would never want to shrink more than this
197 size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
198
199 if (MaxHeapFreeRatio < 100) {
200 const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
201 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
202 const double max_tmp = used_after_gc / minimum_used_percentage;
203 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
204 maximum_desired_capacity = MAX2(maximum_desired_capacity, OldSize);
205 log_trace(gc, heap)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",
206 maximum_free_percentage, minimum_used_percentage);
207 log_trace(gc, heap)(" _capacity_at_prologue: %6.1fK minimum_desired_capacity: %6.1fK maximum_desired_capacity: %6.1fK",
208 _capacity_at_prologue / (double) K,
209 minimum_desired_capacity / (double) K,
210 maximum_desired_capacity / (double) K);
211 assert(minimum_desired_capacity <= maximum_desired_capacity,
212 "sanity check");
213
214 if (capacity_after_gc > maximum_desired_capacity) {
215 // Capacity too large, compute shrinking size
216 shrink_bytes = capacity_after_gc - maximum_desired_capacity;
217 if (ShrinkHeapInSteps) {
218 // If ShrinkHeapInSteps is true (the default),
219 // we don't want to shrink all the way back to initSize if people call
220 // System.gc(), because some programs do that between "phases" and then
221 // we'd just have to grow the heap up again for the next phase. So we
222 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
223 // on the third call, and 100% by the fourth call. But if we recompute
224 // size without shrinking, it goes back to 0%.
225 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
226 if (current_shrink_factor == 0) {
227 _shrink_factor = 10;
228 } else {
229 _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
230 }
231 }
232 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
233 log_trace(gc, heap)(" shrinking: initSize: %.1fK maximum_desired_capacity: %.1fK",
234 OldSize / (double) K, maximum_desired_capacity / (double) K);
235 log_trace(gc, heap)(" shrink_bytes: %.1fK current_shrink_factor: %zu new shrink factor: %zu _min_heap_delta_bytes: %.1fK",
236 shrink_bytes / (double) K,
237 current_shrink_factor,
238 _shrink_factor,
239 _min_heap_delta_bytes / (double) K);
240 }
241 }
242
243 if (capacity_after_gc > _capacity_at_prologue) {
244 // We might have expanded for promotions, in which case we might want to
245 // take back that expansion if there's room after GC. That keeps us from
246 // stretching the heap with promotions when there's plenty of room.
247 size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
248 expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
249 // We have two shrinking computations, take the largest
250 shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
251 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
252 log_trace(gc, heap)(" aggressive shrinking: _capacity_at_prologue: %.1fK capacity_after_gc: %.1fK expansion_for_promotion: %.1fK shrink_bytes: %.1fK",
253 capacity_after_gc / (double) K,
254 _capacity_at_prologue / (double) K,
255 expansion_for_promotion / (double) K,
256 shrink_bytes / (double) K);
257 }
258 // Don't shrink unless it's significant
259 if (shrink_bytes >= _min_heap_delta_bytes) {
260 shrink(shrink_bytes);
261 }
262 }
263
264 HeapWord* TenuredGeneration::block_start(const void* addr) const {
265 HeapWord* cur_block = _bts->block_start_reaching_into_card(addr);
266
267 while (true) {
268 HeapWord* next_block = cur_block + cast_to_oop(cur_block)->size();
269 if (next_block > addr) {
270 assert(cur_block <= addr, "postcondition");
271 return cur_block;
272 }
273 cur_block = next_block;
274 // Because the BOT is precise, we should never step into the next card
275 // (i.e. crossing the card boundary).
276 assert(!SerialBlockOffsetTable::is_crossing_card_boundary(cur_block, (HeapWord*)addr), "must be");
277 }
278 }
279
280 void TenuredGeneration::scan_old_to_young_refs(HeapWord* saved_top_in_old_gen) {
281 _rs->scan_old_to_young_refs(this, saved_top_in_old_gen);
282 }
283
284 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
285 size_t initial_byte_size,
286 size_t min_byte_size,
287 size_t max_byte_size,
288 CardTableRS* remset) :
289 Generation(rs, initial_byte_size), _rs(remset),
290 _min_heap_delta_bytes(), _capacity_at_prologue(),
291 _used_at_prologue()
292 {
293 // If we don't shrink the heap in steps, '_shrink_factor' is always 100%.
294 _shrink_factor = ShrinkHeapInSteps ? 0 : 100;
295 HeapWord* start = (HeapWord*)rs.base();
296 size_t reserved_byte_size = rs.size();
297 assert((uintptr_t(start) & 3) == 0, "bad alignment");
298 assert((reserved_byte_size & 3) == 0, "bad alignment");
299 MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
300 _bts = new SerialBlockOffsetTable(reserved_mr,
301 heap_word_size(initial_byte_size));
302 MemRegion committed_mr(start, heap_word_size(initial_byte_size));
303 _rs->resize_covered_region(committed_mr);
304
305 // Verify that the start and end of this generation is the start of a card.
306 // If this wasn't true, a single card could span more than on generation,
307 // which would cause problems when we commit/uncommit memory, and when we
308 // clear and dirty cards.
309 guarantee(CardTable::is_card_aligned(reserved_mr.start()), "generation must be card aligned");
310 guarantee(CardTable::is_card_aligned(reserved_mr.end()), "generation must be card aligned");
311 _min_heap_delta_bytes = MinHeapDeltaBytes;
312 _capacity_at_prologue = initial_byte_size;
313 _used_at_prologue = 0;
314 HeapWord* bottom = (HeapWord*) _virtual_space.low();
315 HeapWord* end = (HeapWord*) _virtual_space.high();
316 _the_space = new ContiguousSpace();
317 _the_space->initialize(MemRegion(bottom, end), SpaceDecorator::Clear, SpaceDecorator::Mangle);
318 // If we don't shrink the heap in steps, '_shrink_factor' is always 100%.
319 _shrink_factor = ShrinkHeapInSteps ? 0 : 100;
320 _capacity_at_prologue = 0;
321
322 _avg_promoted = new AdaptivePaddedNoZeroDevAverage(AdaptiveSizePolicyWeight, PromotedPadding);
323
324 // initialize performance counters
325
326 const char* gen_name = "old";
327 // Generation Counters -- generation 1, 1 subspace
328 _gen_counters = new GenerationCounters(gen_name, 1, 1,
329 min_byte_size, max_byte_size, _virtual_space.committed_size());
330
331 _gc_counters = new CollectorCounters("Serial full collection pauses", 1);
332
333 _space_counters = new CSpaceCounters(gen_name, 0,
334 _virtual_space.reserved_size(),
335 _the_space, _gen_counters);
336 }
337
338 void TenuredGeneration::gc_prologue() {
339 _capacity_at_prologue = capacity();
340 _used_at_prologue = used();
341 }
342
343 void TenuredGeneration::compute_new_size() {
344 assert_locked_or_safepoint(Heap_lock);
345
346 // Compute some numbers about the state of the heap.
347 const size_t used_after_gc = used();
348 const size_t capacity_after_gc = capacity();
349
350 compute_new_size_inner();
351
352 assert(used() == used_after_gc && used_after_gc <= capacity(),
353 "used: %zu used_after_gc: %zu"
354 " capacity: %zu", used(), used_after_gc, capacity());
355 }
356
357 void TenuredGeneration::update_promote_stats() {
358 size_t used_after_gc = used();
359 size_t promoted_in_bytes;
360 if (used_after_gc > _used_at_prologue) {
361 promoted_in_bytes = used_after_gc - _used_at_prologue;
362 } else {
363 promoted_in_bytes = 0;
364 }
365 _avg_promoted->sample(promoted_in_bytes);
366 }
367
368 void TenuredGeneration::update_counters() {
369 if (UsePerfData) {
370 _space_counters->update_all();
371 _gen_counters->update_capacity(_virtual_space.committed_size());
372 }
373 }
374
375 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
376 size_t available = _the_space->free() + _virtual_space.uncommitted_size();
377
378 size_t avg_promoted = (size_t)_avg_promoted->padded_average();
379 size_t promotion_estimate = MIN2(avg_promoted, max_promotion_in_bytes);
380
381 bool res = (promotion_estimate <= available);
382
383 log_trace(gc)("Tenured: promo attempt is%s safe: available(%zu) %s av_promo(%zu), max_promo(%zu)",
384 res? "":" not", available, res? ">=":"<", avg_promoted, max_promotion_in_bytes);
385
386 return res;
387 }
388
389 oop TenuredGeneration::allocate_for_promotion(oop obj, size_t obj_size) {
390 assert(obj_size == obj->size() || UseCompactObjectHeaders, "bad obj_size passed in");
391
392 #ifndef PRODUCT
393 if (SerialHeap::heap()->promotion_should_fail()) {
394 return nullptr;
395 }
396 #endif // #ifndef PRODUCT
397
398 // Allocate new object.
399 HeapWord* result = allocate(obj_size);
400 if (result == nullptr) {
401 // Promotion of obj into gen failed. Try to expand and allocate.
402 result = expand_and_allocate(obj_size);
403 }
404
405 return cast_to_oop<HeapWord*>(result);
406 }
407
408 HeapWord*
409 TenuredGeneration::expand_and_allocate(size_t word_size) {
410 expand(word_size*HeapWordSize, _min_heap_delta_bytes);
411 return allocate(word_size);
412 }
413
414 void TenuredGeneration::assert_correct_size_change_locking() {
415 assert_locked_or_safepoint(Heap_lock);
416 }
417
418 void TenuredGeneration::object_iterate(ObjectClosure* blk) {
419 _the_space->object_iterate(blk);
420 }
421
422 void TenuredGeneration::complete_loaded_archive_space(MemRegion archive_space) {
423 // Create the BOT for the archive space.
424 HeapWord* start = archive_space.start();
425 while (start < archive_space.end()) {
426 size_t word_size = cast_to_oop(start)->size();;
427 _bts->update_for_block(start, start + word_size);
428 start += word_size;
429 }
430 }
431
432 void TenuredGeneration::gc_epilogue() {
433 // update the generation and space performance counters
434 update_counters();
435 }
436
437 void TenuredGeneration::verify() {
438 _the_space->verify();
439 }
440
441 void TenuredGeneration::print_on(outputStream* st) const {
442 st->print("%-10s", name());
443
444 st->print(" total %zuK, used %zuK ",
445 capacity()/K, used()/K);
446 _virtual_space.print_space_boundaries_on(st);
447
448 StreamIndentor si(st, 1);
449 _the_space->print_on(st, "the ");
450 }