1 /* 2 * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderData.hpp" 27 #include "classfile/vmClasses.hpp" 28 #include "gc/shared/allocTracer.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "gc/shared/collectedHeap.inline.hpp" 32 #include "gc/shared/gcLocker.inline.hpp" 33 #include "gc/shared/gcHeapSummary.hpp" 34 #include "gc/shared/stringdedup/stringDedup.hpp" 35 #include "gc/shared/gcTrace.hpp" 36 #include "gc/shared/gcTraceTime.inline.hpp" 37 #include "gc/shared/gcVMOperations.hpp" 38 #include "gc/shared/gcWhen.hpp" 39 #include "gc/shared/gc_globals.hpp" 40 #include "gc/shared/memAllocator.hpp" 41 #include "gc/shared/tlab_globals.hpp" 42 #include "logging/log.hpp" 43 #include "logging/logStream.hpp" 44 #include "memory/classLoaderMetaspace.hpp" 45 #include "memory/metaspace.hpp" 46 #include "memory/metaspaceUtils.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "memory/universe.hpp" 49 #include "oops/instanceMirrorKlass.hpp" 50 #include "oops/oop.inline.hpp" 51 #include "runtime/handles.inline.hpp" 52 #include "runtime/init.hpp" 53 #include "runtime/javaThread.hpp" 54 #include "runtime/perfData.hpp" 55 #include "runtime/threadSMR.hpp" 56 #include "runtime/vmThread.hpp" 57 #include "services/heapDumper.hpp" 58 #include "utilities/align.hpp" 59 #include "utilities/copy.hpp" 60 #include "utilities/events.hpp" 61 62 class ClassLoaderData; 63 64 size_t CollectedHeap::_lab_alignment_reserve = SIZE_MAX; 65 Klass* CollectedHeap::_filler_object_klass = nullptr; 66 size_t CollectedHeap::_filler_array_max_size = 0; 67 size_t CollectedHeap::_stack_chunk_max_size = 0; 68 69 class GCMessage : public FormatBuffer<1024> { 70 public: 71 bool is_before; 72 }; 73 74 template <> 75 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 76 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 77 st->print_raw(m); 78 } 79 80 class GCHeapLog : public EventLogBase<GCMessage> { 81 private: 82 void log_heap(CollectedHeap* heap, bool before); 83 84 public: 85 GCHeapLog() : EventLogBase<GCMessage>("GC Heap History", "gc") {} 86 87 void log_heap_before(CollectedHeap* heap) { 88 log_heap(heap, true); 89 } 90 void log_heap_after(CollectedHeap* heap) { 91 log_heap(heap, false); 92 } 93 }; 94 95 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) { 96 if (!should_log()) { 97 return; 98 } 99 100 double timestamp = fetch_timestamp(); 101 MutexLocker ml(&_mutex, Mutex::_no_safepoint_check_flag); 102 int index = compute_log_index(); 103 _records[index].thread = nullptr; // Its the GC thread so it's not that interesting. 104 _records[index].timestamp = timestamp; 105 _records[index].data.is_before = before; 106 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 107 108 st.print_cr("{Heap %s GC invocations=%u (full %u):", 109 before ? "before" : "after", 110 heap->total_collections(), 111 heap->total_full_collections()); 112 113 heap->print_on(&st); 114 st.print_cr("}"); 115 } 116 117 ParallelObjectIterator::ParallelObjectIterator(uint thread_num) : 118 _impl(Universe::heap()->parallel_object_iterator(thread_num)) 119 {} 120 121 ParallelObjectIterator::~ParallelObjectIterator() { 122 delete _impl; 123 } 124 125 void ParallelObjectIterator::object_iterate(ObjectClosure* cl, uint worker_id) { 126 _impl->object_iterate(cl, worker_id); 127 } 128 129 size_t CollectedHeap::unused() const { 130 MutexLocker ml(Heap_lock); 131 return capacity() - used(); 132 } 133 134 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 135 size_t capacity_in_words = capacity() / HeapWordSize; 136 137 return VirtualSpaceSummary( 138 _reserved.start(), _reserved.start() + capacity_in_words, _reserved.end()); 139 } 140 141 GCHeapSummary CollectedHeap::create_heap_summary() { 142 VirtualSpaceSummary heap_space = create_heap_space_summary(); 143 return GCHeapSummary(heap_space, used()); 144 } 145 146 MetaspaceSummary CollectedHeap::create_metaspace_summary() { 147 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary = 148 MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType); 149 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary = 150 MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType); 151 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), 152 MetaspaceUtils::get_combined_statistics(), 153 ms_chunk_free_list_summary, class_chunk_free_list_summary); 154 } 155 156 bool CollectedHeap::contains_null(const oop* p) const { 157 return *p == nullptr; 158 } 159 160 void CollectedHeap::print_heap_before_gc() { 161 LogTarget(Debug, gc, heap) lt; 162 if (lt.is_enabled()) { 163 LogStream ls(lt); 164 ls.print_cr("Heap before GC invocations=%u (full %u):", total_collections(), total_full_collections()); 165 ResourceMark rm; 166 print_on(&ls); 167 } 168 169 if (_gc_heap_log != nullptr) { 170 _gc_heap_log->log_heap_before(this); 171 } 172 } 173 174 void CollectedHeap::print_heap_after_gc() { 175 LogTarget(Debug, gc, heap) lt; 176 if (lt.is_enabled()) { 177 LogStream ls(lt); 178 ls.print_cr("Heap after GC invocations=%u (full %u):", total_collections(), total_full_collections()); 179 ResourceMark rm; 180 print_on(&ls); 181 } 182 183 if (_gc_heap_log != nullptr) { 184 _gc_heap_log->log_heap_after(this); 185 } 186 } 187 188 void CollectedHeap::print() const { print_on(tty); } 189 190 void CollectedHeap::print_on_error(outputStream* st) const { 191 st->print_cr("Heap:"); 192 print_extended_on(st); 193 st->cr(); 194 195 BarrierSet* bs = BarrierSet::barrier_set(); 196 if (bs != nullptr) { 197 bs->print_on(st); 198 } 199 } 200 201 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 202 const GCHeapSummary& heap_summary = create_heap_summary(); 203 gc_tracer->report_gc_heap_summary(when, heap_summary); 204 205 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 206 gc_tracer->report_metaspace_summary(when, metaspace_summary); 207 } 208 209 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 210 trace_heap(GCWhen::BeforeGC, gc_tracer); 211 } 212 213 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 214 trace_heap(GCWhen::AfterGC, gc_tracer); 215 } 216 217 // Default implementation, for collectors that don't support the feature. 218 bool CollectedHeap::supports_concurrent_gc_breakpoints() const { 219 return false; 220 } 221 222 bool CollectedHeap::is_oop(oop object) const { 223 if (!is_object_aligned(object)) { 224 return false; 225 } 226 227 if (!is_in(object)) { 228 return false; 229 } 230 231 if (!Metaspace::contains(object->klass_raw())) { 232 return false; 233 } 234 235 return true; 236 } 237 238 // Memory state functions. 239 240 241 CollectedHeap::CollectedHeap() : 242 _capacity_at_last_gc(0), 243 _used_at_last_gc(0), 244 _is_stw_gc_active(false), 245 _last_whole_heap_examined_time_ns(os::javaTimeNanos()), 246 _total_collections(0), 247 _total_full_collections(0), 248 _gc_cause(GCCause::_no_gc), 249 _gc_lastcause(GCCause::_no_gc) 250 { 251 // If the minimum object size is greater than MinObjAlignment, we can 252 // end up with a shard at the end of the buffer that's smaller than 253 // the smallest object. We can't allow that because the buffer must 254 // look like it's full of objects when we retire it, so we make 255 // sure we have enough space for a filler int array object. 256 size_t min_size = min_dummy_object_size(); 257 _lab_alignment_reserve = min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0; 258 259 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 260 const size_t elements_per_word = HeapWordSize / sizeof(jint); 261 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 262 max_len / elements_per_word); 263 264 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 265 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 266 267 if (UsePerfData) { 268 EXCEPTION_MARK; 269 270 // create the gc cause jvmstat counters 271 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 272 80, GCCause::to_string(_gc_cause), CHECK); 273 274 _perf_gc_lastcause = 275 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 276 80, GCCause::to_string(_gc_lastcause), CHECK); 277 } 278 279 // Create the ring log 280 if (LogEvents) { 281 _gc_heap_log = new GCHeapLog(); 282 } else { 283 _gc_heap_log = nullptr; 284 } 285 } 286 287 // This interface assumes that it's being called by the 288 // vm thread. It collects the heap assuming that the 289 // heap lock is already held and that we are executing in 290 // the context of the vm thread. 291 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 292 Thread* thread = Thread::current(); 293 assert(thread->is_VM_thread(), "Precondition#1"); 294 assert(Heap_lock->is_locked(), "Precondition#2"); 295 GCCauseSetter gcs(this, cause); 296 switch (cause) { 297 case GCCause::_codecache_GC_threshold: 298 case GCCause::_codecache_GC_aggressive: 299 case GCCause::_heap_inspection: 300 case GCCause::_heap_dump: 301 case GCCause::_metadata_GC_threshold: { 302 HandleMark hm(thread); 303 do_full_collection(false); // don't clear all soft refs 304 break; 305 } 306 case GCCause::_metadata_GC_clear_soft_refs: { 307 HandleMark hm(thread); 308 do_full_collection(true); // do clear all soft refs 309 break; 310 } 311 default: 312 ShouldNotReachHere(); // Unexpected use of this function 313 } 314 } 315 316 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 317 size_t word_size, 318 Metaspace::MetadataType mdtype) { 319 uint loop_count = 0; 320 uint gc_count = 0; 321 uint full_gc_count = 0; 322 323 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 324 325 do { 326 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 327 if (result != nullptr) { 328 return result; 329 } 330 331 if (GCLocker::is_active_and_needs_gc()) { 332 // If the GCLocker is active, just expand and allocate. 333 // If that does not succeed, wait if this thread is not 334 // in a critical section itself. 335 result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype); 336 if (result != nullptr) { 337 return result; 338 } 339 JavaThread* jthr = JavaThread::current(); 340 if (!jthr->in_critical()) { 341 // Wait for JNI critical section to be exited 342 GCLocker::stall_until_clear(); 343 // The GC invoked by the last thread leaving the critical 344 // section will be a young collection and a full collection 345 // is (currently) needed for unloading classes so continue 346 // to the next iteration to get a full GC. 347 continue; 348 } else { 349 if (CheckJNICalls) { 350 fatal("Possible deadlock due to allocating while" 351 " in jni critical section"); 352 } 353 return nullptr; 354 } 355 } 356 357 { // Need lock to get self consistent gc_count's 358 MutexLocker ml(Heap_lock); 359 gc_count = Universe::heap()->total_collections(); 360 full_gc_count = Universe::heap()->total_full_collections(); 361 } 362 363 // Generate a VM operation 364 VM_CollectForMetadataAllocation op(loader_data, 365 word_size, 366 mdtype, 367 gc_count, 368 full_gc_count, 369 GCCause::_metadata_GC_threshold); 370 VMThread::execute(&op); 371 372 // If GC was locked out, try again. Check before checking success because the 373 // prologue could have succeeded and the GC still have been locked out. 374 if (op.gc_locked()) { 375 continue; 376 } 377 378 if (op.prologue_succeeded()) { 379 return op.result(); 380 } 381 loop_count++; 382 if ((QueuedAllocationWarningCount > 0) && 383 (loop_count % QueuedAllocationWarningCount == 0)) { 384 log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times," 385 " size=" SIZE_FORMAT, loop_count, word_size); 386 } 387 } while (true); // Until a GC is done 388 } 389 390 MemoryUsage CollectedHeap::memory_usage() { 391 return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity()); 392 } 393 394 void CollectedHeap::set_gc_cause(GCCause::Cause v) { 395 if (UsePerfData) { 396 _gc_lastcause = _gc_cause; 397 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause)); 398 _perf_gc_cause->set_value(GCCause::to_string(v)); 399 } 400 _gc_cause = v; 401 } 402 403 size_t CollectedHeap::max_tlab_size() const { 404 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 405 // This restriction could be removed by enabling filling with multiple arrays. 406 // If we compute that the reasonable way as 407 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 408 // we'll overflow on the multiply, so we do the divide first. 409 // We actually lose a little by dividing first, 410 // but that just makes the TLAB somewhat smaller than the biggest array, 411 // which is fine, since we'll be able to fill that. 412 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 413 sizeof(jint) * 414 ((juint) max_jint / (size_t) HeapWordSize); 415 return align_down(max_int_size, MinObjAlignment); 416 } 417 418 size_t CollectedHeap::filler_array_hdr_size() { 419 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long 420 } 421 422 size_t CollectedHeap::filler_array_min_size() { 423 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 424 } 425 426 void CollectedHeap::zap_filler_array_with(HeapWord* start, size_t words, juint value) { 427 Copy::fill_to_words(start + filler_array_hdr_size(), 428 words - filler_array_hdr_size(), value); 429 } 430 431 #ifdef ASSERT 432 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 433 { 434 assert(words >= min_fill_size(), "too small to fill"); 435 assert(is_object_aligned(words), "unaligned size"); 436 } 437 438 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 439 { 440 if (ZapFillerObjects && zap) { 441 zap_filler_array_with(start, words, 0XDEAFBABE); 442 } 443 } 444 #endif // ASSERT 445 446 void 447 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 448 { 449 assert(words >= filler_array_min_size(), "too small for an array"); 450 assert(words <= filler_array_max_size(), "too big for a single object"); 451 452 const size_t payload_size = words - filler_array_hdr_size(); 453 const size_t len = payload_size * HeapWordSize / sizeof(jint); 454 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len); 455 456 ObjArrayAllocator allocator(Universe::fillerArrayKlassObj(), words, (int)len, /* do_zero */ false); 457 allocator.initialize(start); 458 if (DumpSharedSpaces) { 459 // This array is written into the CDS archive. Make sure it 460 // has deterministic contents. 461 zap_filler_array_with(start, words, 0); 462 } else { 463 DEBUG_ONLY(zap_filler_array(start, words, zap);) 464 } 465 } 466 467 void 468 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 469 { 470 assert(words <= filler_array_max_size(), "too big for a single object"); 471 472 if (words >= filler_array_min_size()) { 473 fill_with_array(start, words, zap); 474 } else if (words > 0) { 475 assert(words == min_fill_size(), "unaligned size"); 476 ObjAllocator allocator(CollectedHeap::filler_object_klass(), words); 477 allocator.initialize(start); 478 } 479 } 480 481 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 482 { 483 DEBUG_ONLY(fill_args_check(start, words);) 484 HandleMark hm(Thread::current()); // Free handles before leaving. 485 fill_with_object_impl(start, words, zap); 486 } 487 488 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 489 { 490 DEBUG_ONLY(fill_args_check(start, words);) 491 HandleMark hm(Thread::current()); // Free handles before leaving. 492 493 // Multiple objects may be required depending on the filler array maximum size. Fill 494 // the range up to that with objects that are filler_array_max_size sized. The 495 // remainder is filled with a single object. 496 const size_t min = min_fill_size(); 497 const size_t max = filler_array_max_size(); 498 while (words > max) { 499 const size_t cur = (words - max) >= min ? max : max - min; 500 fill_with_array(start, cur, zap); 501 start += cur; 502 words -= cur; 503 } 504 505 fill_with_object_impl(start, words, zap); 506 } 507 508 void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) { 509 CollectedHeap::fill_with_object(start, end, zap); 510 } 511 512 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size, 513 size_t requested_size, 514 size_t* actual_size) { 515 guarantee(false, "thread-local allocation buffers not supported"); 516 return nullptr; 517 } 518 519 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 520 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), 521 "Should only be called at a safepoint or at start-up"); 522 523 ThreadLocalAllocStats stats; 524 525 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) { 526 BarrierSet::barrier_set()->make_parsable(thread); 527 if (UseTLAB) { 528 if (retire_tlabs) { 529 thread->tlab().retire(&stats); 530 } else { 531 thread->tlab().make_parsable(); 532 } 533 } 534 } 535 536 stats.publish(); 537 } 538 539 void CollectedHeap::resize_all_tlabs() { 540 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), 541 "Should only resize tlabs at safepoint"); 542 543 if (UseTLAB && ResizeTLAB) { 544 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) { 545 thread->tlab().resize(); 546 } 547 } 548 } 549 550 jlong CollectedHeap::millis_since_last_whole_heap_examined() { 551 return (os::javaTimeNanos() - _last_whole_heap_examined_time_ns) / NANOSECS_PER_MILLISEC; 552 } 553 554 void CollectedHeap::record_whole_heap_examined_timestamp() { 555 _last_whole_heap_examined_time_ns = os::javaTimeNanos(); 556 } 557 558 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) { 559 assert(timer != nullptr, "timer is null"); 560 if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) { 561 GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer); 562 HeapDumper::dump_heap(); 563 } 564 565 LogTarget(Trace, gc, classhisto) lt; 566 if (lt.is_enabled()) { 567 GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer); 568 ResourceMark rm; 569 LogStream ls(lt); 570 VM_GC_HeapInspection inspector(&ls, false /* ! full gc */); 571 inspector.doit(); 572 } 573 } 574 575 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 576 full_gc_dump(timer, true); 577 } 578 579 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 580 full_gc_dump(timer, false); 581 } 582 583 void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) { 584 // It is important to do this in a way such that concurrent readers can't 585 // temporarily think something is in the heap. (Seen this happen in asserts.) 586 _reserved.set_word_size(0); 587 _reserved.set_start((HeapWord*)rs.base()); 588 _reserved.set_end((HeapWord*)rs.end()); 589 } 590 591 void CollectedHeap::post_initialize() { 592 StringDedup::initialize(); 593 initialize_serviceability(); 594 } 595 596 #ifndef PRODUCT 597 598 bool CollectedHeap::promotion_should_fail(volatile size_t* count) { 599 // Access to count is not atomic; the value does not have to be exact. 600 if (PromotionFailureALot) { 601 const size_t gc_num = total_collections(); 602 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number; 603 if (elapsed_gcs >= PromotionFailureALotInterval) { 604 // Test for unsigned arithmetic wrap-around. 605 if (++*count >= PromotionFailureALotCount) { 606 *count = 0; 607 return true; 608 } 609 } 610 } 611 return false; 612 } 613 614 bool CollectedHeap::promotion_should_fail() { 615 return promotion_should_fail(&_promotion_failure_alot_count); 616 } 617 618 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) { 619 if (PromotionFailureALot) { 620 _promotion_failure_alot_gc_number = total_collections(); 621 *count = 0; 622 } 623 } 624 625 void CollectedHeap::reset_promotion_should_fail() { 626 reset_promotion_should_fail(&_promotion_failure_alot_count); 627 } 628 629 #endif // #ifndef PRODUCT 630 631 // It's the caller's responsibility to ensure glitch-freedom 632 // (if required). 633 void CollectedHeap::update_capacity_and_used_at_gc() { 634 _capacity_at_last_gc = capacity(); 635 _used_at_last_gc = used(); 636 }