1 /* 2 * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderData.hpp" 27 #include "classfile/vmClasses.hpp" 28 #include "gc/shared/allocTracer.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "gc/shared/collectedHeap.inline.hpp" 32 #include "gc/shared/gcLocker.inline.hpp" 33 #include "gc/shared/gcHeapSummary.hpp" 34 #include "gc/shared/stringdedup/stringDedup.hpp" 35 #include "gc/shared/gcTrace.hpp" 36 #include "gc/shared/gcTraceTime.inline.hpp" 37 #include "gc/shared/gcVMOperations.hpp" 38 #include "gc/shared/gcWhen.hpp" 39 #include "gc/shared/gc_globals.hpp" 40 #include "gc/shared/memAllocator.hpp" 41 #include "gc/shared/tlab_globals.hpp" 42 #include "logging/log.hpp" 43 #include "logging/logStream.hpp" 44 #include "memory/classLoaderMetaspace.hpp" 45 #include "memory/metaspaceUtils.hpp" 46 #include "memory/resourceArea.hpp" 47 #include "memory/universe.hpp" 48 #include "oops/instanceMirrorKlass.hpp" 49 #include "oops/oop.inline.hpp" 50 #include "runtime/handles.inline.hpp" 51 #include "runtime/init.hpp" 52 #include "runtime/perfData.hpp" 53 #include "runtime/thread.inline.hpp" 54 #include "runtime/threadSMR.hpp" 55 #include "runtime/vmThread.hpp" 56 #include "services/heapDumper.hpp" 57 #include "utilities/align.hpp" 58 #include "utilities/copy.hpp" 59 #include "utilities/events.hpp" 60 61 class ClassLoaderData; 62 63 size_t CollectedHeap::_filler_array_max_size = 0; 64 65 class GCMessage : public FormatBuffer<1024> { 66 public: 67 bool is_before; 68 }; 69 70 template <> 71 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 72 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 73 st->print_raw(m); 74 } 75 76 class GCHeapLog : public EventLogBase<GCMessage> { 77 private: 78 void log_heap(CollectedHeap* heap, bool before); 79 80 public: 81 GCHeapLog() : EventLogBase<GCMessage>("GC Heap History", "gc") {} 82 83 void log_heap_before(CollectedHeap* heap) { 84 log_heap(heap, true); 85 } 86 void log_heap_after(CollectedHeap* heap) { 87 log_heap(heap, false); 88 } 89 }; 90 91 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) { 92 if (!should_log()) { 93 return; 94 } 95 96 double timestamp = fetch_timestamp(); 97 MutexLocker ml(&_mutex, Mutex::_no_safepoint_check_flag); 98 int index = compute_log_index(); 99 _records[index].thread = NULL; // Its the GC thread so it's not that interesting. 100 _records[index].timestamp = timestamp; 101 _records[index].data.is_before = before; 102 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 103 104 st.print_cr("{Heap %s GC invocations=%u (full %u):", 105 before ? "before" : "after", 106 heap->total_collections(), 107 heap->total_full_collections()); 108 109 heap->print_on(&st); 110 st.print_cr("}"); 111 } 112 113 ParallelObjectIterator::ParallelObjectIterator(uint thread_num) : 114 _impl(Universe::heap()->parallel_object_iterator(thread_num)) 115 {} 116 117 ParallelObjectIterator::~ParallelObjectIterator() { 118 delete _impl; 119 } 120 121 void ParallelObjectIterator::object_iterate(ObjectClosure* cl, uint worker_id) { 122 _impl->object_iterate(cl, worker_id); 123 } 124 125 size_t CollectedHeap::unused() const { 126 MutexLocker ml(Heap_lock); 127 return capacity() - used(); 128 } 129 130 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 131 size_t capacity_in_words = capacity() / HeapWordSize; 132 133 return VirtualSpaceSummary( 134 _reserved.start(), _reserved.start() + capacity_in_words, _reserved.end()); 135 } 136 137 GCHeapSummary CollectedHeap::create_heap_summary() { 138 VirtualSpaceSummary heap_space = create_heap_space_summary(); 139 return GCHeapSummary(heap_space, used()); 140 } 141 142 MetaspaceSummary CollectedHeap::create_metaspace_summary() { 143 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary = 144 MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType); 145 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary = 146 MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType); 147 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), 148 MetaspaceUtils::get_combined_statistics(), 149 ms_chunk_free_list_summary, class_chunk_free_list_summary); 150 } 151 152 void CollectedHeap::print_heap_before_gc() { 153 LogTarget(Debug, gc, heap) lt; 154 if (lt.is_enabled()) { 155 LogStream ls(lt); 156 ls.print_cr("Heap before GC invocations=%u (full %u):", total_collections(), total_full_collections()); 157 ResourceMark rm; 158 print_on(&ls); 159 } 160 161 if (_gc_heap_log != NULL) { 162 _gc_heap_log->log_heap_before(this); 163 } 164 } 165 166 void CollectedHeap::print_heap_after_gc() { 167 LogTarget(Debug, gc, heap) lt; 168 if (lt.is_enabled()) { 169 LogStream ls(lt); 170 ls.print_cr("Heap after GC invocations=%u (full %u):", total_collections(), total_full_collections()); 171 ResourceMark rm; 172 print_on(&ls); 173 } 174 175 if (_gc_heap_log != NULL) { 176 _gc_heap_log->log_heap_after(this); 177 } 178 } 179 180 void CollectedHeap::print() const { print_on(tty); } 181 182 void CollectedHeap::print_on_error(outputStream* st) const { 183 st->print_cr("Heap:"); 184 print_extended_on(st); 185 st->cr(); 186 187 BarrierSet* bs = BarrierSet::barrier_set(); 188 if (bs != NULL) { 189 bs->print_on(st); 190 } 191 } 192 193 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 194 const GCHeapSummary& heap_summary = create_heap_summary(); 195 gc_tracer->report_gc_heap_summary(when, heap_summary); 196 197 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 198 gc_tracer->report_metaspace_summary(when, metaspace_summary); 199 } 200 201 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 202 trace_heap(GCWhen::BeforeGC, gc_tracer); 203 } 204 205 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 206 trace_heap(GCWhen::AfterGC, gc_tracer); 207 } 208 209 // Default implementation, for collectors that don't support the feature. 210 bool CollectedHeap::supports_concurrent_gc_breakpoints() const { 211 return false; 212 } 213 214 bool CollectedHeap::is_oop(oop object) const { 215 if (!is_object_aligned(object)) { 216 return false; 217 } 218 219 if (!is_in(object)) { 220 return false; 221 } 222 223 if (is_in(object->klass_or_null())) { 224 return false; 225 } 226 227 return true; 228 } 229 230 // Memory state functions. 231 232 233 CollectedHeap::CollectedHeap() : 234 _capacity_at_last_gc(0), 235 _used_at_last_gc(0), 236 _is_gc_active(false), 237 _last_whole_heap_examined_time_ns(os::javaTimeNanos()), 238 _total_collections(0), 239 _total_full_collections(0), 240 _gc_cause(GCCause::_no_gc), 241 _gc_lastcause(GCCause::_no_gc) 242 { 243 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 244 const size_t elements_per_word = HeapWordSize / sizeof(jint); 245 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 246 max_len / elements_per_word); 247 248 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 249 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 250 251 if (UsePerfData) { 252 EXCEPTION_MARK; 253 254 // create the gc cause jvmstat counters 255 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 256 80, GCCause::to_string(_gc_cause), CHECK); 257 258 _perf_gc_lastcause = 259 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 260 80, GCCause::to_string(_gc_lastcause), CHECK); 261 } 262 263 // Create the ring log 264 if (LogEvents) { 265 _gc_heap_log = new GCHeapLog(); 266 } else { 267 _gc_heap_log = NULL; 268 } 269 } 270 271 // This interface assumes that it's being called by the 272 // vm thread. It collects the heap assuming that the 273 // heap lock is already held and that we are executing in 274 // the context of the vm thread. 275 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 276 Thread* thread = Thread::current(); 277 assert(thread->is_VM_thread(), "Precondition#1"); 278 assert(Heap_lock->is_locked(), "Precondition#2"); 279 GCCauseSetter gcs(this, cause); 280 switch (cause) { 281 case GCCause::_heap_inspection: 282 case GCCause::_heap_dump: 283 case GCCause::_metadata_GC_threshold : { 284 HandleMark hm(thread); 285 do_full_collection(false); // don't clear all soft refs 286 break; 287 } 288 case GCCause::_archive_time_gc: 289 case GCCause::_metadata_GC_clear_soft_refs: { 290 HandleMark hm(thread); 291 do_full_collection(true); // do clear all soft refs 292 break; 293 } 294 default: 295 ShouldNotReachHere(); // Unexpected use of this function 296 } 297 } 298 299 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 300 size_t word_size, 301 Metaspace::MetadataType mdtype) { 302 uint loop_count = 0; 303 uint gc_count = 0; 304 uint full_gc_count = 0; 305 306 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 307 308 do { 309 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 310 if (result != NULL) { 311 return result; 312 } 313 314 if (GCLocker::is_active_and_needs_gc()) { 315 // If the GCLocker is active, just expand and allocate. 316 // If that does not succeed, wait if this thread is not 317 // in a critical section itself. 318 result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype); 319 if (result != NULL) { 320 return result; 321 } 322 JavaThread* jthr = JavaThread::current(); 323 if (!jthr->in_critical()) { 324 // Wait for JNI critical section to be exited 325 GCLocker::stall_until_clear(); 326 // The GC invoked by the last thread leaving the critical 327 // section will be a young collection and a full collection 328 // is (currently) needed for unloading classes so continue 329 // to the next iteration to get a full GC. 330 continue; 331 } else { 332 if (CheckJNICalls) { 333 fatal("Possible deadlock due to allocating while" 334 " in jni critical section"); 335 } 336 return NULL; 337 } 338 } 339 340 { // Need lock to get self consistent gc_count's 341 MutexLocker ml(Heap_lock); 342 gc_count = Universe::heap()->total_collections(); 343 full_gc_count = Universe::heap()->total_full_collections(); 344 } 345 346 // Generate a VM operation 347 VM_CollectForMetadataAllocation op(loader_data, 348 word_size, 349 mdtype, 350 gc_count, 351 full_gc_count, 352 GCCause::_metadata_GC_threshold); 353 VMThread::execute(&op); 354 355 // If GC was locked out, try again. Check before checking success because the 356 // prologue could have succeeded and the GC still have been locked out. 357 if (op.gc_locked()) { 358 continue; 359 } 360 361 if (op.prologue_succeeded()) { 362 return op.result(); 363 } 364 loop_count++; 365 if ((QueuedAllocationWarningCount > 0) && 366 (loop_count % QueuedAllocationWarningCount == 0)) { 367 log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times," 368 " size=" SIZE_FORMAT, loop_count, word_size); 369 } 370 } while (true); // Until a GC is done 371 } 372 373 MemoryUsage CollectedHeap::memory_usage() { 374 return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity()); 375 } 376 377 void CollectedHeap::set_gc_cause(GCCause::Cause v) { 378 if (UsePerfData) { 379 _gc_lastcause = _gc_cause; 380 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause)); 381 _perf_gc_cause->set_value(GCCause::to_string(v)); 382 } 383 _gc_cause = v; 384 } 385 386 size_t CollectedHeap::max_tlab_size() const { 387 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 388 // This restriction could be removed by enabling filling with multiple arrays. 389 // If we compute that the reasonable way as 390 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 391 // we'll overflow on the multiply, so we do the divide first. 392 // We actually lose a little by dividing first, 393 // but that just makes the TLAB somewhat smaller than the biggest array, 394 // which is fine, since we'll be able to fill that. 395 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 396 sizeof(jint) * 397 ((juint) max_jint / (size_t) HeapWordSize); 398 return align_down(max_int_size, MinObjAlignment); 399 } 400 401 size_t CollectedHeap::filler_array_hdr_size() { 402 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long 403 } 404 405 size_t CollectedHeap::filler_array_min_size() { 406 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 407 } 408 409 #ifdef ASSERT 410 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 411 { 412 assert(words >= min_fill_size(), "too small to fill"); 413 assert(is_object_aligned(words), "unaligned size"); 414 } 415 416 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 417 { 418 if (ZapFillerObjects && zap) { 419 Copy::fill_to_words(start + filler_array_hdr_size(), 420 words - filler_array_hdr_size(), 0XDEAFBABE); 421 } 422 } 423 #endif // ASSERT 424 425 void 426 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 427 { 428 assert(words >= filler_array_min_size(), "too small for an array"); 429 assert(words <= filler_array_max_size(), "too big for a single object"); 430 431 const size_t payload_size = words - filler_array_hdr_size(); 432 const size_t len = payload_size * HeapWordSize / sizeof(jint); 433 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len); 434 435 ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false); 436 allocator.initialize(start); 437 DEBUG_ONLY(zap_filler_array(start, words, zap);) 438 } 439 440 void 441 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 442 { 443 assert(words <= filler_array_max_size(), "too big for a single object"); 444 445 if (words >= filler_array_min_size()) { 446 fill_with_array(start, words, zap); 447 } else if (words > 0) { 448 assert(words == min_fill_size(), "unaligned size"); 449 ObjAllocator allocator(vmClasses::Object_klass(), words); 450 allocator.initialize(start); 451 } 452 } 453 454 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 455 { 456 DEBUG_ONLY(fill_args_check(start, words);) 457 HandleMark hm(Thread::current()); // Free handles before leaving. 458 fill_with_object_impl(start, words, zap); 459 } 460 461 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 462 { 463 DEBUG_ONLY(fill_args_check(start, words);) 464 HandleMark hm(Thread::current()); // Free handles before leaving. 465 466 // Multiple objects may be required depending on the filler array maximum size. Fill 467 // the range up to that with objects that are filler_array_max_size sized. The 468 // remainder is filled with a single object. 469 const size_t min = min_fill_size(); 470 const size_t max = filler_array_max_size(); 471 while (words > max) { 472 const size_t cur = (words - max) >= min ? max : max - min; 473 fill_with_array(start, cur, zap); 474 start += cur; 475 words -= cur; 476 } 477 478 fill_with_object_impl(start, words, zap); 479 } 480 481 void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) { 482 CollectedHeap::fill_with_object(start, end, zap); 483 } 484 485 size_t CollectedHeap::min_dummy_object_size() const { 486 return oopDesc::header_size(); 487 } 488 489 size_t CollectedHeap::tlab_alloc_reserve() const { 490 size_t min_size = min_dummy_object_size(); 491 return min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0; 492 } 493 494 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size, 495 size_t requested_size, 496 size_t* actual_size) { 497 guarantee(false, "thread-local allocation buffers not supported"); 498 return NULL; 499 } 500 501 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 502 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), 503 "Should only be called at a safepoint or at start-up"); 504 505 ThreadLocalAllocStats stats; 506 507 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) { 508 BarrierSet::barrier_set()->make_parsable(thread); 509 if (UseTLAB) { 510 if (retire_tlabs) { 511 thread->tlab().retire(&stats); 512 } else { 513 thread->tlab().make_parsable(); 514 } 515 } 516 } 517 518 stats.publish(); 519 } 520 521 void CollectedHeap::resize_all_tlabs() { 522 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), 523 "Should only resize tlabs at safepoint"); 524 525 if (UseTLAB && ResizeTLAB) { 526 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) { 527 thread->tlab().resize(); 528 } 529 } 530 } 531 532 jlong CollectedHeap::millis_since_last_whole_heap_examined() { 533 return (os::javaTimeNanos() - _last_whole_heap_examined_time_ns) / NANOSECS_PER_MILLISEC; 534 } 535 536 void CollectedHeap::record_whole_heap_examined_timestamp() { 537 _last_whole_heap_examined_time_ns = os::javaTimeNanos(); 538 } 539 540 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) { 541 assert(timer != NULL, "timer is null"); 542 if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) { 543 GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer); 544 HeapDumper::dump_heap(); 545 } 546 547 LogTarget(Trace, gc, classhisto) lt; 548 if (lt.is_enabled()) { 549 GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer); 550 ResourceMark rm; 551 LogStream ls(lt); 552 VM_GC_HeapInspection inspector(&ls, false /* ! full gc */); 553 inspector.doit(); 554 } 555 } 556 557 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 558 full_gc_dump(timer, true); 559 } 560 561 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 562 full_gc_dump(timer, false); 563 } 564 565 void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) { 566 // It is important to do this in a way such that concurrent readers can't 567 // temporarily think something is in the heap. (Seen this happen in asserts.) 568 _reserved.set_word_size(0); 569 _reserved.set_start((HeapWord*)rs.base()); 570 _reserved.set_end((HeapWord*)rs.end()); 571 } 572 573 void CollectedHeap::post_initialize() { 574 StringDedup::initialize(); 575 initialize_serviceability(); 576 } 577 578 #ifndef PRODUCT 579 580 bool CollectedHeap::promotion_should_fail(volatile size_t* count) { 581 // Access to count is not atomic; the value does not have to be exact. 582 if (PromotionFailureALot) { 583 const size_t gc_num = total_collections(); 584 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number; 585 if (elapsed_gcs >= PromotionFailureALotInterval) { 586 // Test for unsigned arithmetic wrap-around. 587 if (++*count >= PromotionFailureALotCount) { 588 *count = 0; 589 return true; 590 } 591 } 592 } 593 return false; 594 } 595 596 bool CollectedHeap::promotion_should_fail() { 597 return promotion_should_fail(&_promotion_failure_alot_count); 598 } 599 600 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) { 601 if (PromotionFailureALot) { 602 _promotion_failure_alot_gc_number = total_collections(); 603 *count = 0; 604 } 605 } 606 607 void CollectedHeap::reset_promotion_should_fail() { 608 reset_promotion_should_fail(&_promotion_failure_alot_count); 609 } 610 611 #endif // #ifndef PRODUCT 612 613 bool CollectedHeap::supports_object_pinning() const { 614 return false; 615 } 616 617 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) { 618 ShouldNotReachHere(); 619 return NULL; 620 } 621 622 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) { 623 ShouldNotReachHere(); 624 } 625 626 bool CollectedHeap::is_archived_object(oop object) const { 627 return false; 628 } 629 630 uint32_t CollectedHeap::hash_oop(oop obj) const { 631 const uintptr_t addr = cast_from_oop<uintptr_t>(obj); 632 return static_cast<uint32_t>(addr >> LogMinObjAlignment); 633 } 634 635 // It's the caller's responsibility to ensure glitch-freedom 636 // (if required). 637 void CollectedHeap::update_capacity_and_used_at_gc() { 638 _capacity_at_last_gc = capacity(); 639 _used_at_last_gc = used(); 640 }