1 /* 2 * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/parallel/objectStartArray.inline.hpp" 27 #include "gc/parallel/parallelArguments.hpp" 28 #include "gc/parallel/parallelScavengeHeap.hpp" 29 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 30 #include "gc/parallel/psCardTable.hpp" 31 #include "gc/parallel/psOldGen.hpp" 32 #include "gc/shared/cardTableBarrierSet.hpp" 33 #include "gc/shared/gcLocker.hpp" 34 #include "gc/shared/spaceDecorator.inline.hpp" 35 #include "logging/log.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/java.hpp" 38 #include "utilities/align.hpp" 39 40 PSOldGen::PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size, 41 size_t max_size, const char* perf_data_name, int level): 42 _min_gen_size(min_size), 43 _max_gen_size(max_size) 44 { 45 initialize(rs, initial_size, GenAlignment, perf_data_name, level); 46 } 47 48 void PSOldGen::initialize(ReservedSpace rs, size_t initial_size, size_t alignment, 49 const char* perf_data_name, int level) { 50 initialize_virtual_space(rs, initial_size, alignment); 51 initialize_work(perf_data_name, level); 52 53 initialize_performance_counters(perf_data_name, level); 54 } 55 56 void PSOldGen::initialize_virtual_space(ReservedSpace rs, 57 size_t initial_size, 58 size_t alignment) { 59 60 _virtual_space = new PSVirtualSpace(rs, alignment); 61 if (!_virtual_space->expand_by(initial_size)) { 62 vm_exit_during_initialization("Could not reserve enough space for " 63 "object heap"); 64 } 65 } 66 67 void PSOldGen::initialize_work(const char* perf_data_name, int level) { 68 MemRegion const reserved_mr = reserved(); 69 assert(reserved_mr.byte_size() == max_gen_size(), "invariant"); 70 71 // Object start stuff: for all reserved memory 72 start_array()->initialize(reserved_mr); 73 74 // Card table stuff: for all committed memory 75 MemRegion committed_mr((HeapWord*)virtual_space()->low(), 76 (HeapWord*)virtual_space()->high()); 77 78 if (ZapUnusedHeapArea) { 79 // Mangle newly committed space immediately rather than 80 // waiting for the initialization of the space even though 81 // mangling is related to spaces. Doing it here eliminates 82 // the need to carry along information that a complete mangling 83 // (bottom to end) needs to be done. 84 SpaceMangler::mangle_region(committed_mr); 85 } 86 87 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 88 PSCardTable* ct = heap->card_table(); 89 ct->resize_covered_region(committed_mr); 90 91 // Verify that the start and end of this generation is the start of a card. 92 // If this wasn't true, a single card could span more than one generation, 93 // which would cause problems when we commit/uncommit memory, and when we 94 // clear and dirty cards. 95 guarantee(ct->is_card_aligned(reserved_mr.start()), "generation must be card aligned"); 96 // Check the heap layout documented at `class ParallelScavengeHeap`. 97 assert(reserved_mr.end() != heap->reserved_region().end(), "invariant"); 98 guarantee(ct->is_card_aligned(reserved_mr.end()), "generation must be card aligned"); 99 100 // 101 // ObjectSpace stuff 102 // 103 104 _object_space = new MutableSpace(virtual_space()->alignment()); 105 object_space()->initialize(committed_mr, 106 SpaceDecorator::Clear, 107 SpaceDecorator::Mangle, 108 MutableSpace::SetupPages, 109 &ParallelScavengeHeap::heap()->workers()); 110 111 // Update the start_array 112 start_array()->set_covered_region(committed_mr); 113 } 114 115 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) { 116 // Generation Counters, generation 'level', 1 subspace 117 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, min_gen_size(), 118 max_gen_size(), virtual_space()); 119 _space_counters = new SpaceCounters(perf_data_name, 0, 120 virtual_space()->reserved_size(), 121 _object_space, _gen_counters); 122 } 123 124 // Assume that the generation has been allocated if its 125 // reserved size is not 0. 126 bool PSOldGen::is_allocated() { 127 return virtual_space()->reserved_size() != 0; 128 } 129 130 size_t PSOldGen::num_iterable_blocks() const { 131 return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize; 132 } 133 134 void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) { 135 size_t block_word_size = IterateBlockSize / HeapWordSize; 136 assert((block_word_size % (ObjectStartArray::card_size())) == 0, 137 "Block size not a multiple of start_array block"); 138 139 MutableSpace *space = object_space(); 140 141 HeapWord* begin = space->bottom() + block_index * block_word_size; 142 HeapWord* end = MIN2(space->top(), begin + block_word_size); 143 144 if (!start_array()->object_starts_in_range(begin, end)) { 145 return; 146 } 147 148 // Get object starting at or reaching into this block. 149 HeapWord* start = start_array()->object_start(begin); 150 if (start < begin) { 151 start += cast_to_oop(start)->size(); 152 } 153 assert(start >= begin, 154 "Object address" PTR_FORMAT " must be larger or equal to block address at " PTR_FORMAT, 155 p2i(start), p2i(begin)); 156 // Iterate all objects until the end. 157 for (HeapWord* p = start; p < end; p += cast_to_oop(p)->size()) { 158 cl->do_object(cast_to_oop(p)); 159 } 160 } 161 162 bool PSOldGen::expand_for_allocate(size_t word_size) { 163 assert(word_size > 0, "allocating zero words?"); 164 bool result = true; 165 { 166 MutexLocker x(PSOldGenExpand_lock); 167 // Avoid "expand storms" by rechecking available space after obtaining 168 // the lock, because another thread may have already made sufficient 169 // space available. If insufficient space available, that will remain 170 // true until we expand, since we have the lock. Other threads may take 171 // the space we need before we can allocate it, regardless of whether we 172 // expand. That's okay, we'll just try expanding again. 173 if (object_space()->needs_expand(word_size)) { 174 result = expand(word_size*HeapWordSize); 175 } 176 } 177 if (GCExpandToAllocateDelayMillis > 0) { 178 os::naked_sleep(GCExpandToAllocateDelayMillis); 179 } 180 return result; 181 } 182 183 bool PSOldGen::expand(size_t bytes) { 184 assert_lock_strong(PSOldGenExpand_lock); 185 assert_locked_or_safepoint(Heap_lock); 186 assert(bytes > 0, "precondition"); 187 const size_t alignment = virtual_space()->alignment(); 188 size_t aligned_bytes = align_up(bytes, alignment); 189 size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment); 190 191 if (UseNUMA) { 192 // With NUMA we use round-robin page allocation for the old gen. Expand by at least 193 // providing a page per lgroup. Alignment is larger or equal to the page size. 194 aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num()); 195 } 196 if (aligned_bytes == 0) { 197 // The alignment caused the number of bytes to wrap. A call to expand 198 // implies a best effort to expand by "bytes" but not a guarantee. Align 199 // down to give a best effort. This is likely the most that the generation 200 // can expand since it has some capacity to start with. 201 aligned_bytes = align_down(bytes, alignment); 202 } 203 204 bool success = false; 205 if (aligned_expand_bytes > aligned_bytes) { 206 success = expand_by(aligned_expand_bytes); 207 } 208 if (!success) { 209 success = expand_by(aligned_bytes); 210 } 211 if (!success) { 212 success = expand_to_reserved(); 213 } 214 215 if (success && GCLocker::is_active_and_needs_gc()) { 216 log_debug(gc)("Garbage collection disabled, expanded heap instead"); 217 } 218 return success; 219 } 220 221 bool PSOldGen::expand_by(size_t bytes) { 222 assert_lock_strong(PSOldGenExpand_lock); 223 assert_locked_or_safepoint(Heap_lock); 224 assert(bytes > 0, "precondition"); 225 bool result = virtual_space()->expand_by(bytes); 226 if (result) { 227 if (ZapUnusedHeapArea) { 228 // We need to mangle the newly expanded area. The memregion spans 229 // end -> new_end, we assume that top -> end is already mangled. 230 // Do the mangling before post_resize() is called because 231 // the space is available for allocation after post_resize(); 232 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high(); 233 assert(object_space()->end() < virtual_space_high, 234 "Should be true before post_resize()"); 235 MemRegion mangle_region(object_space()->end(), virtual_space_high); 236 // Note that the object space has not yet been updated to 237 // coincide with the new underlying virtual space. 238 SpaceMangler::mangle_region(mangle_region); 239 } 240 post_resize(); 241 if (UsePerfData) { 242 _space_counters->update_capacity(); 243 _gen_counters->update_all(); 244 } 245 } 246 247 if (result) { 248 size_t new_mem_size = virtual_space()->committed_size(); 249 size_t old_mem_size = new_mem_size - bytes; 250 log_debug(gc)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", 251 name(), old_mem_size/K, bytes/K, new_mem_size/K); 252 } 253 254 return result; 255 } 256 257 bool PSOldGen::expand_to_reserved() { 258 assert_lock_strong(PSOldGenExpand_lock); 259 assert_locked_or_safepoint(Heap_lock); 260 261 bool result = false; 262 const size_t remaining_bytes = virtual_space()->uncommitted_size(); 263 if (remaining_bytes > 0) { 264 result = expand_by(remaining_bytes); 265 DEBUG_ONLY(if (!result) log_warning(gc)("grow to reserve failed")); 266 } 267 return result; 268 } 269 270 void PSOldGen::shrink(size_t bytes) { 271 assert_lock_strong(PSOldGenExpand_lock); 272 assert_locked_or_safepoint(Heap_lock); 273 274 size_t size = align_down(bytes, virtual_space()->alignment()); 275 if (size > 0) { 276 virtual_space()->shrink_by(bytes); 277 post_resize(); 278 279 size_t new_mem_size = virtual_space()->committed_size(); 280 size_t old_mem_size = new_mem_size + bytes; 281 log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", 282 name(), old_mem_size/K, bytes/K, new_mem_size/K); 283 } 284 } 285 286 void PSOldGen::complete_loaded_archive_space(MemRegion archive_space) { 287 HeapWord* cur = archive_space.start(); 288 while (cur < archive_space.end()) { 289 _start_array.allocate_block(cur); 290 size_t word_size = cast_to_oop(cur)->size(); 291 cur += word_size; 292 } 293 } 294 295 void PSOldGen::resize(size_t desired_free_space) { 296 const size_t alignment = virtual_space()->alignment(); 297 const size_t size_before = virtual_space()->committed_size(); 298 size_t new_size = used_in_bytes() + desired_free_space; 299 if (new_size < used_in_bytes()) { 300 // Overflowed the addition. 301 new_size = max_gen_size(); 302 } 303 // Adjust according to our min and max 304 new_size = clamp(new_size, min_gen_size(), max_gen_size()); 305 306 new_size = align_up(new_size, alignment); 307 308 const size_t current_size = capacity_in_bytes(); 309 310 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: " 311 "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT 312 " new size: " SIZE_FORMAT " current size " SIZE_FORMAT 313 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT, 314 desired_free_space, used_in_bytes(), new_size, current_size, 315 max_gen_size(), min_gen_size()); 316 317 if (new_size == current_size) { 318 // No change requested 319 return; 320 } 321 if (new_size > current_size) { 322 size_t change_bytes = new_size - current_size; 323 MutexLocker x(PSOldGenExpand_lock); 324 expand(change_bytes); 325 } else { 326 size_t change_bytes = current_size - new_size; 327 MutexLocker x(PSOldGenExpand_lock); 328 shrink(change_bytes); 329 } 330 331 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ", 332 ParallelScavengeHeap::heap()->total_collections(), 333 size_before, 334 virtual_space()->committed_size()); 335 } 336 337 // NOTE! We need to be careful about resizing. During a GC, multiple 338 // allocators may be active during heap expansion. If we allow the 339 // heap resizing to become visible before we have correctly resized 340 // all heap related data structures, we may cause program failures. 341 void PSOldGen::post_resize() { 342 // First construct a memregion representing the new size 343 MemRegion new_memregion((HeapWord*)virtual_space()->low(), 344 (HeapWord*)virtual_space()->high()); 345 size_t new_word_size = new_memregion.word_size(); 346 347 start_array()->set_covered_region(new_memregion); 348 ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion); 349 350 WorkerThreads* workers = Thread::current()->is_VM_thread() ? 351 &ParallelScavengeHeap::heap()->workers() : nullptr; 352 353 // The update of the space's end is done by this call. As that 354 // makes the new space available for concurrent allocation, this 355 // must be the last step when expanding. 356 object_space()->initialize(new_memregion, 357 SpaceDecorator::DontClear, 358 SpaceDecorator::DontMangle, 359 MutableSpace::SetupPages, 360 workers); 361 362 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()), 363 "Sanity"); 364 } 365 366 void PSOldGen::print() const { print_on(tty);} 367 void PSOldGen::print_on(outputStream* st) const { 368 st->print(" %-15s", name()); 369 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 370 capacity_in_bytes()/K, used_in_bytes()/K); 371 st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")", 372 p2i(virtual_space()->low_boundary()), 373 p2i(virtual_space()->high()), 374 p2i(virtual_space()->high_boundary())); 375 376 st->print(" object"); object_space()->print_on(st); 377 } 378 379 void PSOldGen::update_counters() { 380 if (UsePerfData) { 381 _space_counters->update_all(); 382 _gen_counters->update_all(); 383 } 384 } 385 386 void PSOldGen::verify() { 387 object_space()->verify(); 388 } 389 390 class VerifyObjectStartArrayClosure : public ObjectClosure { 391 ObjectStartArray* _start_array; 392 393 public: 394 VerifyObjectStartArrayClosure(ObjectStartArray* start_array) : 395 _start_array(start_array) { } 396 397 virtual void do_object(oop obj) { 398 // With compact headers, the objects can be one-word sized. 399 size_t int_off = UseCompactObjectHeaders ? MIN2((size_t)1, obj->size() - 1) : 1; 400 HeapWord* test_addr = cast_from_oop<HeapWord*>(obj) + int_off; 401 guarantee(_start_array->object_start(test_addr) == cast_from_oop<HeapWord*>(obj), "ObjectStartArray cannot find start of object"); 402 guarantee(_start_array->is_block_allocated(cast_from_oop<HeapWord*>(obj)), "ObjectStartArray missing block allocation"); 403 } 404 }; 405 406 void PSOldGen::verify_object_start_array() { 407 VerifyObjectStartArrayClosure check(&_start_array); 408 object_iterate(&check); 409 } 410 411 #ifndef PRODUCT 412 void PSOldGen::record_spaces_top() { 413 assert(ZapUnusedHeapArea, "Not mangling unused space"); 414 object_space()->set_top_for_allocations(); 415 } 416 #endif