1 /*
2 * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/serial/cardTableRS.hpp"
26 #include "gc/serial/serialBlockOffsetTable.inline.hpp"
27 #include "gc/serial/serialFullGC.hpp"
28 #include "gc/serial/serialHeap.hpp"
29 #include "gc/serial/tenuredGeneration.inline.hpp"
30 #include "gc/shared/collectorCounters.hpp"
31 #include "gc/shared/gcLocker.hpp"
32 #include "gc/shared/gcTimer.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/genArguments.hpp"
35 #include "gc/shared/hSpaceCounters.hpp"
36 #include "gc/shared/space.hpp"
37 #include "gc/shared/spaceDecorator.hpp"
38 #include "logging/log.hpp"
39 #include "memory/allocation.inline.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "runtime/java.hpp"
42 #include "utilities/copy.hpp"
43 #include "utilities/macros.hpp"
44
45 bool TenuredGeneration::grow_by(size_t bytes) {
46 assert_correct_size_change_locking();
47 bool result = _virtual_space.expand_by(bytes);
48 if (result) {
49 size_t new_word_size =
50 heap_word_size(_virtual_space.committed_size());
51 MemRegion mr(space()->bottom(), new_word_size);
52 // Expand card table
53 SerialHeap::heap()->rem_set()->resize_covered_region(mr);
54 // Expand shared block offset array
55 _bts->resize(new_word_size);
56
57 // Fix for bug #4668531
58 if (ZapUnusedHeapArea) {
59 MemRegion mangle_region(space()->end(), (HeapWord*)_virtual_space.high());
60 SpaceMangler::mangle_region(mangle_region);
61 }
62
63 // Expand space -- also expands space's BOT
64 // (which uses (part of) shared array above)
65 space()->set_end((HeapWord*)_virtual_space.high());
66
67 // update the space and generation capacity counters
68 update_counters();
69
70 size_t new_mem_size = _virtual_space.committed_size();
71 size_t old_mem_size = new_mem_size - bytes;
72 log_trace(gc, heap)("Expanding %s from %zuK by %zuK to %zuK",
73 name(), old_mem_size/K, bytes/K, new_mem_size/K);
74 }
75 return result;
76 }
77
78 bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
79 assert_locked_or_safepoint(Heap_lock);
80 if (bytes == 0) {
81 return true; // That's what grow_by(0) would return
82 }
83 size_t aligned_bytes = os::align_up_vm_page_size(bytes);
84 if (aligned_bytes == 0){
85 // The alignment caused the number of bytes to wrap. An expand_by(0) will
86 // return true with the implication that an expansion was done when it
87 // was not. A call to expand implies a best effort to expand by "bytes"
88 // but not a guarantee. Align down to give a best effort. This is likely
89 // the most that the generation can expand since it has some capacity to
90 // start with.
91 aligned_bytes = os::align_down_vm_page_size(bytes);
92 }
93 size_t aligned_expand_bytes = os::align_up_vm_page_size(expand_bytes);
94 bool success = false;
95 if (aligned_expand_bytes > aligned_bytes) {
96 success = grow_by(aligned_expand_bytes);
97 }
98 if (!success) {
99 success = grow_by(aligned_bytes);
100 }
101 if (!success) {
102 success = grow_to_reserved();
103 }
104
105 return success;
106 }
107
108 bool TenuredGeneration::grow_to_reserved() {
109 assert_correct_size_change_locking();
110 bool success = true;
111 const size_t remaining_bytes = _virtual_space.uncommitted_size();
112 if (remaining_bytes > 0) {
113 success = grow_by(remaining_bytes);
114 DEBUG_ONLY(if (!success) log_warning(gc)("grow to reserved failed");)
115 }
116 return success;
117 }
118
119 void TenuredGeneration::shrink(size_t bytes) {
120 assert_correct_size_change_locking();
121
122 size_t size = os::align_down_vm_page_size(bytes);
123 if (size == 0) {
124 return;
125 }
126
127 // Shrink committed space
128 _virtual_space.shrink_by(size);
129 // Shrink space; this also shrinks the space's BOT
130 space()->set_end((HeapWord*) _virtual_space.high());
131 size_t new_word_size = heap_word_size(space()->capacity());
132 // Shrink the shared block offset array
133 _bts->resize(new_word_size);
134 MemRegion mr(space()->bottom(), new_word_size);
135 // Shrink the card table
136 SerialHeap::heap()->rem_set()->resize_covered_region(mr);
137
138 size_t new_mem_size = _virtual_space.committed_size();
139 size_t old_mem_size = new_mem_size + size;
140 log_trace(gc, heap)("Shrinking %s from %zuK to %zuK",
141 name(), old_mem_size/K, new_mem_size/K);
142 }
143
144 void TenuredGeneration::compute_new_size_inner() {
145 assert(_shrink_factor <= 100, "invalid shrink factor");
146 size_t current_shrink_factor = _shrink_factor;
147 if (ShrinkHeapInSteps) {
148 // Always reset '_shrink_factor' if the heap is shrunk in steps.
149 // If we shrink the heap in this iteration, '_shrink_factor' will
150 // be recomputed based on the old value further down in this function.
151 _shrink_factor = 0;
152 }
153
154 // We don't have floating point command-line arguments
155 // Note: argument processing ensures that MinHeapFreeRatio < 100.
156 const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
157 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
158
159 // Compute some numbers about the state of the heap.
160 const size_t used_after_gc = used();
161 const size_t capacity_after_gc = capacity();
162
163 const double min_tmp = used_after_gc / maximum_used_percentage;
164 size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
165 // Don't shrink less than the initial generation size
166 minimum_desired_capacity = MAX2(minimum_desired_capacity, OldSize);
167 assert(used_after_gc <= minimum_desired_capacity, "sanity check");
168
169 const size_t free_after_gc = free();
170 const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
171 log_trace(gc, heap)("TenuredGeneration::compute_new_size:");
172 log_trace(gc, heap)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",
173 minimum_free_percentage,
174 maximum_used_percentage);
175 log_trace(gc, heap)(" free_after_gc : %6.1fK used_after_gc : %6.1fK capacity_after_gc : %6.1fK",
176 free_after_gc / (double) K,
177 used_after_gc / (double) K,
178 capacity_after_gc / (double) K);
179 log_trace(gc, heap)(" free_percentage: %6.2f", free_percentage);
180
181 if (capacity_after_gc < minimum_desired_capacity) {
182 // If we have less free space than we want then expand
183 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
184 // Don't expand unless it's significant
185 if (expand_bytes >= _min_heap_delta_bytes) {
186 expand(expand_bytes, 0); // safe if expansion fails
187 }
188 log_trace(gc, heap)(" expanding: minimum_desired_capacity: %6.1fK expand_bytes: %6.1fK _min_heap_delta_bytes: %6.1fK",
189 minimum_desired_capacity / (double) K,
190 expand_bytes / (double) K,
191 _min_heap_delta_bytes / (double) K);
192 return;
193 }
194
195 // No expansion, now see if we want to shrink
196 size_t shrink_bytes = 0;
197 // We would never want to shrink more than this
198 size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
199
200 if (MaxHeapFreeRatio < 100) {
201 const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
202 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
203 const double max_tmp = used_after_gc / minimum_used_percentage;
204 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
205 maximum_desired_capacity = MAX2(maximum_desired_capacity, OldSize);
206 log_trace(gc, heap)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",
207 maximum_free_percentage, minimum_used_percentage);
208 log_trace(gc, heap)(" _capacity_at_prologue: %6.1fK minimum_desired_capacity: %6.1fK maximum_desired_capacity: %6.1fK",
209 _capacity_at_prologue / (double) K,
210 minimum_desired_capacity / (double) K,
211 maximum_desired_capacity / (double) K);
212 assert(minimum_desired_capacity <= maximum_desired_capacity,
213 "sanity check");
214
215 if (capacity_after_gc > maximum_desired_capacity) {
216 // Capacity too large, compute shrinking size
217 shrink_bytes = capacity_after_gc - maximum_desired_capacity;
218 if (ShrinkHeapInSteps) {
219 // If ShrinkHeapInSteps is true (the default),
220 // we don't want to shrink all the way back to initSize if people call
221 // System.gc(), because some programs do that between "phases" and then
222 // we'd just have to grow the heap up again for the next phase. So we
223 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
224 // on the third call, and 100% by the fourth call. But if we recompute
225 // size without shrinking, it goes back to 0%.
226 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
227 if (current_shrink_factor == 0) {
228 _shrink_factor = 10;
229 } else {
230 _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
231 }
232 }
233 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
234 log_trace(gc, heap)(" shrinking: initSize: %.1fK maximum_desired_capacity: %.1fK",
235 OldSize / (double) K, maximum_desired_capacity / (double) K);
236 log_trace(gc, heap)(" shrink_bytes: %.1fK current_shrink_factor: %zu new shrink factor: %zu _min_heap_delta_bytes: %.1fK",
237 shrink_bytes / (double) K,
238 current_shrink_factor,
239 _shrink_factor,
240 _min_heap_delta_bytes / (double) K);
241 }
242 }
243
244 if (capacity_after_gc > _capacity_at_prologue) {
245 // We might have expanded for promotions, in which case we might want to
246 // take back that expansion if there's room after GC. That keeps us from
247 // stretching the heap with promotions when there's plenty of room.
248 size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
249 expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
250 // We have two shrinking computations, take the largest
251 shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
252 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
253 log_trace(gc, heap)(" aggressive shrinking: _capacity_at_prologue: %.1fK capacity_after_gc: %.1fK expansion_for_promotion: %.1fK shrink_bytes: %.1fK",
254 capacity_after_gc / (double) K,
255 _capacity_at_prologue / (double) K,
256 expansion_for_promotion / (double) K,
257 shrink_bytes / (double) K);
258 }
259 // Don't shrink unless it's significant
260 if (shrink_bytes >= _min_heap_delta_bytes) {
261 shrink(shrink_bytes);
262 }
263 }
264
265 HeapWord* TenuredGeneration::block_start(const void* addr) const {
266 HeapWord* cur_block = _bts->block_start_reaching_into_card(addr);
267
268 while (true) {
269 HeapWord* next_block = cur_block + cast_to_oop(cur_block)->size();
270 if (next_block > addr) {
271 assert(cur_block <= addr, "postcondition");
272 return cur_block;
273 }
274 cur_block = next_block;
275 // Because the BOT is precise, we should never step into the next card
276 // (i.e. crossing the card boundary).
277 assert(!SerialBlockOffsetTable::is_crossing_card_boundary(cur_block, (HeapWord*)addr), "must be");
278 }
279 }
280
281 void TenuredGeneration::scan_old_to_young_refs() {
282 _rs->scan_old_to_young_refs(this, space()->top());
283 }
284
285 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
286 size_t initial_byte_size,
287 size_t min_byte_size,
288 size_t max_byte_size,
289 CardTableRS* remset) :
290 Generation(rs, initial_byte_size), _rs(remset),
291 _min_heap_delta_bytes(), _capacity_at_prologue(),
292 _used_at_prologue()
293 {
294 // If we don't shrink the heap in steps, '_shrink_factor' is always 100%.
295 _shrink_factor = ShrinkHeapInSteps ? 0 : 100;
296 HeapWord* start = (HeapWord*)rs.base();
297 size_t reserved_byte_size = rs.size();
298 assert((uintptr_t(start) & 3) == 0, "bad alignment");
299 assert((reserved_byte_size & 3) == 0, "bad alignment");
300 MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
301 _bts = new SerialBlockOffsetTable(reserved_mr,
302 heap_word_size(initial_byte_size));
303 MemRegion committed_mr(start, heap_word_size(initial_byte_size));
304 _rs->resize_covered_region(committed_mr);
305
306 // Verify that the start and end of this generation is the start of a card.
307 // If this wasn't true, a single card could span more than on generation,
308 // which would cause problems when we commit/uncommit memory, and when we
309 // clear and dirty cards.
310 guarantee(CardTable::is_card_aligned(reserved_mr.start()), "generation must be card aligned");
311 guarantee(CardTable::is_card_aligned(reserved_mr.end()), "generation must be card aligned");
312 _min_heap_delta_bytes = MinHeapDeltaBytes;
313 _capacity_at_prologue = initial_byte_size;
314 _used_at_prologue = 0;
315 HeapWord* bottom = (HeapWord*) _virtual_space.low();
316 HeapWord* end = (HeapWord*) _virtual_space.high();
317 _the_space = new ContiguousSpace();
318 _the_space->initialize(MemRegion(bottom, end), SpaceDecorator::Clear);
319 // If we don't shrink the heap in steps, '_shrink_factor' is always 100%.
320 _shrink_factor = ShrinkHeapInSteps ? 0 : 100;
321 _capacity_at_prologue = 0;
322
323 _avg_promoted = new AdaptivePaddedNoZeroDevAverage(AdaptiveSizePolicyWeight, PromotedPadding);
324
325 // initialize performance counters
326
327 const char* gen_name = "old";
328 // Generation Counters -- generation 1, 1 subspace
329 _gen_counters = new GenerationCounters(gen_name, 1, 1,
330 min_byte_size, max_byte_size, _virtual_space.committed_size());
331
332 _gc_counters = new CollectorCounters("Serial full collection pauses", 1);
333
334 _space_counters = new HSpaceCounters(_gen_counters->name_space(), gen_name, 0,
335 _virtual_space.reserved_size(),
336 _the_space->capacity());
337 }
338
339 void TenuredGeneration::gc_prologue() {
340 _capacity_at_prologue = capacity();
341 _used_at_prologue = used();
342 }
343
344 void TenuredGeneration::compute_new_size() {
345 assert_locked_or_safepoint(Heap_lock);
346
347 // Compute some numbers about the state of the heap.
348 const size_t used_after_gc = used();
349 const size_t capacity_after_gc = capacity();
350
351 compute_new_size_inner();
352
353 assert(used() == used_after_gc && used_after_gc <= capacity(),
354 "used: %zu used_after_gc: %zu"
355 " capacity: %zu", used(), used_after_gc, capacity());
356 }
357
358 void TenuredGeneration::update_promote_stats() {
359 size_t used_after_gc = used();
360 size_t promoted_in_bytes;
361 if (used_after_gc > _used_at_prologue) {
362 promoted_in_bytes = used_after_gc - _used_at_prologue;
363 } else {
364 promoted_in_bytes = 0;
365 }
366 _avg_promoted->sample(promoted_in_bytes);
367 }
368
369 void TenuredGeneration::update_counters() {
370 if (UsePerfData) {
371 _space_counters->update_all(_the_space->capacity(), _the_space->used());
372 _gen_counters->update_capacity(_virtual_space.committed_size());
373 }
374 }
375
376 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
377 size_t available = _the_space->free() + _virtual_space.uncommitted_size();
378
379 size_t avg_promoted = (size_t)_avg_promoted->padded_average();
380 size_t promotion_estimate = MIN2(avg_promoted, max_promotion_in_bytes);
381
382 bool res = (promotion_estimate <= available);
383
384 log_trace(gc)("Tenured: promo attempt is%s safe: available(%zu) %s av_promo(%zu), max_promo(%zu)",
385 res? "":" not", available, res? ">=":"<", avg_promoted, max_promotion_in_bytes);
386
387 return res;
388 }
389
390 oop TenuredGeneration::allocate_for_promotion(oop obj, size_t obj_size) {
391 assert(obj_size == obj->size() || UseCompactObjectHeaders, "bad obj_size passed in");
392
393 #ifndef PRODUCT
394 if (SerialHeap::heap()->promotion_should_fail()) {
395 return nullptr;
396 }
397 #endif // #ifndef PRODUCT
398
399 // Allocate new object.
400 HeapWord* result = allocate(obj_size);
401 if (result == nullptr) {
402 // Promotion of obj into gen failed. Try to expand and allocate.
403 result = expand_and_allocate(obj_size);
404 }
405
406 return cast_to_oop<HeapWord*>(result);
407 }
408
409 HeapWord*
410 TenuredGeneration::expand_and_allocate(size_t word_size) {
411 expand(word_size*HeapWordSize, _min_heap_delta_bytes);
412 return allocate(word_size);
413 }
414
415 void TenuredGeneration::assert_correct_size_change_locking() {
416 assert_locked_or_safepoint(Heap_lock);
417 }
418
419 void TenuredGeneration::object_iterate(ObjectClosure* blk) {
420 _the_space->object_iterate(blk);
421 }
422
423 void TenuredGeneration::complete_loaded_archive_space(MemRegion archive_space) {
424 // Create the BOT for the archive space.
425 HeapWord* start = archive_space.start();
426 while (start < archive_space.end()) {
427 size_t word_size = cast_to_oop(start)->size();;
428 _bts->update_for_block(start, start + word_size);
429 start += word_size;
430 }
431 }
432
433 void TenuredGeneration::gc_epilogue() {
434 // update the generation and space performance counters
435 update_counters();
436 }
437
438 void TenuredGeneration::verify() {
439 _the_space->verify();
440 }
441
442 void TenuredGeneration::print_on(outputStream* st) const {
443 st->print("%-10s", name());
444
445 st->print(" total %zuK, used %zuK ",
446 capacity()/K, used()/K);
447 _virtual_space.print_space_boundaries_on(st);
448
449 StreamIndentor si(st, 1);
450 _the_space->print_on(st, "the ");
451 }