< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page




  74   stringStream st(_records[index].data.buffer(), _records[index].data.size());
  75 
  76   st.print_cr("{Heap %s GC invocations=%u (full %u):",
  77                  before ? "before" : "after",
  78                  heap->total_collections(),
  79                  heap->total_full_collections());
  80 
  81   heap->print_on(&st);
  82   st.print_cr("}");
  83 }
  84 
  85 size_t CollectedHeap::unused() const {
  86   MutexLocker ml(Heap_lock);
  87   return capacity() - used();
  88 }
  89 
  90 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
  91   size_t capacity_in_words = capacity() / HeapWordSize;
  92 
  93   return VirtualSpaceSummary(
  94     _reserved.start(), _reserved.start() + capacity_in_words, _reserved.end());
  95 }
  96 
  97 GCHeapSummary CollectedHeap::create_heap_summary() {
  98   VirtualSpaceSummary heap_space = create_heap_space_summary();
  99   return GCHeapSummary(heap_space, used());
 100 }
 101 
 102 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
 103   const MetaspaceSizes meta_space(
 104       MetaspaceUtils::committed_bytes(),
 105       MetaspaceUtils::used_bytes(),
 106       MetaspaceUtils::reserved_bytes());
 107   const MetaspaceSizes data_space(
 108       MetaspaceUtils::committed_bytes(Metaspace::NonClassType),
 109       MetaspaceUtils::used_bytes(Metaspace::NonClassType),
 110       MetaspaceUtils::reserved_bytes(Metaspace::NonClassType));
 111   const MetaspaceSizes class_space(
 112       MetaspaceUtils::committed_bytes(Metaspace::ClassType),
 113       MetaspaceUtils::used_bytes(Metaspace::ClassType),
 114       MetaspaceUtils::reserved_bytes(Metaspace::ClassType));


 161 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
 162   trace_heap(GCWhen::AfterGC, gc_tracer);
 163 }
 164 
 165 // WhiteBox API support for concurrent collectors.  These are the
 166 // default implementations, for collectors which don't support this
 167 // feature.
 168 bool CollectedHeap::supports_concurrent_phase_control() const {
 169   return false;
 170 }
 171 
 172 bool CollectedHeap::request_concurrent_phase(const char* phase) {
 173   return false;
 174 }
 175 
 176 bool CollectedHeap::is_oop(oop object) const {
 177   if (!check_obj_alignment(object)) {
 178     return false;
 179   }
 180 
 181   if (!is_in(object)) {
 182     return false;
 183   }
 184 
 185   if (is_in(object->klass_or_null())) {
 186     return false;
 187   }
 188 
 189   return true;
 190 }
 191 
 192 // Memory state functions.
 193 
 194 
 195 CollectedHeap::CollectedHeap() :
 196   _is_gc_active(false),
 197   _total_collections(0),
 198   _total_full_collections(0),
 199   _gc_cause(GCCause::_no_gc),
 200   _gc_lastcause(GCCause::_no_gc)
 201 {
 202   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
 203   const size_t elements_per_word = HeapWordSize / sizeof(jint);
 204   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
 205                                              max_len / elements_per_word);


 326     }
 327   } while (true);  // Until a GC is done
 328 }
 329 
 330 MemoryUsage CollectedHeap::memory_usage() {
 331   return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
 332 }
 333 
 334 
 335 #ifndef PRODUCT
 336 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
 337   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 338     for (size_t slot = 0; slot < size; slot += 1) {
 339       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
 340              "Found non badHeapWordValue in pre-allocation check");
 341     }
 342   }
 343 }
 344 #endif // PRODUCT
 345 
 346 void CollectedHeap::check_oop_location(void* addr) const {
 347   assert(check_obj_alignment(addr), "address is not aligned");
 348   assert(_reserved.contains(addr),  "address is not in reserved heap");
 349 }
 350 
 351 size_t CollectedHeap::max_tlab_size() const {
 352   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 353   // This restriction could be removed by enabling filling with multiple arrays.
 354   // If we compute that the reasonable way as
 355   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 356   // we'll overflow on the multiply, so we do the divide first.
 357   // We actually lose a little by dividing first,
 358   // but that just makes the TLAB  somewhat smaller than the biggest array,
 359   // which is fine, since we'll be able to fill that.
 360   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 361               sizeof(jint) *
 362               ((juint) max_jint / (size_t) HeapWordSize);
 363   return align_down(max_int_size, MinObjAlignment);
 364 }
 365 
 366 size_t CollectedHeap::filler_array_hdr_size() {
 367   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
 368 }
 369 
 370 size_t CollectedHeap::filler_array_min_size() {
 371   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
 372 }
 373 
 374 #ifdef ASSERT
 375 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
 376 {
 377   assert(words >= min_fill_size(), "too small to fill");
 378   assert(is_object_aligned(words), "unaligned size");
 379   DEBUG_ONLY(Universe::heap()->check_oop_location(start);)
 380   DEBUG_ONLY(Universe::heap()->check_oop_location(start + words - MinObjAlignment);)
 381 }
 382 
 383 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
 384 {
 385   if (ZapFillerObjects && zap) {
 386     Copy::fill_to_words(start + filler_array_hdr_size(),
 387                         words - filler_array_hdr_size(), 0XDEAFBABE);
 388   }
 389 }
 390 #endif // ASSERT
 391 
 392 void
 393 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
 394 {
 395   assert(words >= filler_array_min_size(), "too small for an array");
 396   assert(words <= filler_array_max_size(), "too big for a single object");
 397 
 398   const size_t payload_size = words - filler_array_hdr_size();
 399   const size_t len = payload_size * HeapWordSize / sizeof(jint);
 400   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);


 504   }
 505 
 506   LogTarget(Trace, gc, classhisto) lt;
 507   if (lt.is_enabled()) {
 508     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
 509     ResourceMark rm;
 510     LogStream ls(lt);
 511     VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
 512     inspector.doit();
 513   }
 514 }
 515 
 516 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
 517   full_gc_dump(timer, true);
 518 }
 519 
 520 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
 521   full_gc_dump(timer, false);
 522 }
 523 
 524 void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) {
 525   // It is important to do this in a way such that concurrent readers can't
 526   // temporarily think something is in the heap.  (Seen this happen in asserts.)
 527   _reserved.set_word_size(0);
 528   _reserved.set_start((HeapWord*)rs.base());
 529   _reserved.set_end((HeapWord*)rs.end());
 530 }
 531 
 532 void CollectedHeap::post_initialize() {
 533   initialize_serviceability();
 534 }
 535 
 536 #ifndef PRODUCT
 537 
 538 bool CollectedHeap::promotion_should_fail(volatile size_t* count) {
 539   // Access to count is not atomic; the value does not have to be exact.
 540   if (PromotionFailureALot) {
 541     const size_t gc_num = total_collections();
 542     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
 543     if (elapsed_gcs >= PromotionFailureALotInterval) {
 544       // Test for unsigned arithmetic wrap-around.
 545       if (++*count >= PromotionFailureALotCount) {
 546         *count = 0;
 547         return true;
 548       }
 549     }




  74   stringStream st(_records[index].data.buffer(), _records[index].data.size());
  75 
  76   st.print_cr("{Heap %s GC invocations=%u (full %u):",
  77                  before ? "before" : "after",
  78                  heap->total_collections(),
  79                  heap->total_full_collections());
  80 
  81   heap->print_on(&st);
  82   st.print_cr("}");
  83 }
  84 
  85 size_t CollectedHeap::unused() const {
  86   MutexLocker ml(Heap_lock);
  87   return capacity() - used();
  88 }
  89 
  90 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
  91   size_t capacity_in_words = capacity() / HeapWordSize;
  92 
  93   return VirtualSpaceSummary(
  94     reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
  95 }
  96 
  97 GCHeapSummary CollectedHeap::create_heap_summary() {
  98   VirtualSpaceSummary heap_space = create_heap_space_summary();
  99   return GCHeapSummary(heap_space, used());
 100 }
 101 
 102 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
 103   const MetaspaceSizes meta_space(
 104       MetaspaceUtils::committed_bytes(),
 105       MetaspaceUtils::used_bytes(),
 106       MetaspaceUtils::reserved_bytes());
 107   const MetaspaceSizes data_space(
 108       MetaspaceUtils::committed_bytes(Metaspace::NonClassType),
 109       MetaspaceUtils::used_bytes(Metaspace::NonClassType),
 110       MetaspaceUtils::reserved_bytes(Metaspace::NonClassType));
 111   const MetaspaceSizes class_space(
 112       MetaspaceUtils::committed_bytes(Metaspace::ClassType),
 113       MetaspaceUtils::used_bytes(Metaspace::ClassType),
 114       MetaspaceUtils::reserved_bytes(Metaspace::ClassType));


 161 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
 162   trace_heap(GCWhen::AfterGC, gc_tracer);
 163 }
 164 
 165 // WhiteBox API support for concurrent collectors.  These are the
 166 // default implementations, for collectors which don't support this
 167 // feature.
 168 bool CollectedHeap::supports_concurrent_phase_control() const {
 169   return false;
 170 }
 171 
 172 bool CollectedHeap::request_concurrent_phase(const char* phase) {
 173   return false;
 174 }
 175 
 176 bool CollectedHeap::is_oop(oop object) const {
 177   if (!check_obj_alignment(object)) {
 178     return false;
 179   }
 180 
 181   if (!is_in_reserved(object)) {
 182     return false;
 183   }
 184 
 185   if (is_in_reserved(object->klass_or_null())) {
 186     return false;
 187   }
 188 
 189   return true;
 190 }
 191 
 192 // Memory state functions.
 193 
 194 
 195 CollectedHeap::CollectedHeap() :
 196   _is_gc_active(false),
 197   _total_collections(0),
 198   _total_full_collections(0),
 199   _gc_cause(GCCause::_no_gc),
 200   _gc_lastcause(GCCause::_no_gc)
 201 {
 202   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
 203   const size_t elements_per_word = HeapWordSize / sizeof(jint);
 204   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
 205                                              max_len / elements_per_word);


 326     }
 327   } while (true);  // Until a GC is done
 328 }
 329 
 330 MemoryUsage CollectedHeap::memory_usage() {
 331   return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
 332 }
 333 
 334 
 335 #ifndef PRODUCT
 336 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
 337   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 338     for (size_t slot = 0; slot < size; slot += 1) {
 339       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
 340              "Found non badHeapWordValue in pre-allocation check");
 341     }
 342   }
 343 }
 344 #endif // PRODUCT
 345 





 346 size_t CollectedHeap::max_tlab_size() const {
 347   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 348   // This restriction could be removed by enabling filling with multiple arrays.
 349   // If we compute that the reasonable way as
 350   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 351   // we'll overflow on the multiply, so we do the divide first.
 352   // We actually lose a little by dividing first,
 353   // but that just makes the TLAB  somewhat smaller than the biggest array,
 354   // which is fine, since we'll be able to fill that.
 355   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 356               sizeof(jint) *
 357               ((juint) max_jint / (size_t) HeapWordSize);
 358   return align_down(max_int_size, MinObjAlignment);
 359 }
 360 
 361 size_t CollectedHeap::filler_array_hdr_size() {
 362   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
 363 }
 364 
 365 size_t CollectedHeap::filler_array_min_size() {
 366   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
 367 }
 368 
 369 #ifdef ASSERT
 370 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
 371 {
 372   assert(words >= min_fill_size(), "too small to fill");
 373   assert(is_object_aligned(words), "unaligned size");
 374   assert(Universe::heap()->is_in_reserved(start), "not in heap");
 375   assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
 376 }
 377 
 378 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
 379 {
 380   if (ZapFillerObjects && zap) {
 381     Copy::fill_to_words(start + filler_array_hdr_size(),
 382                         words - filler_array_hdr_size(), 0XDEAFBABE);
 383   }
 384 }
 385 #endif // ASSERT
 386 
 387 void
 388 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
 389 {
 390   assert(words >= filler_array_min_size(), "too small for an array");
 391   assert(words <= filler_array_max_size(), "too big for a single object");
 392 
 393   const size_t payload_size = words - filler_array_hdr_size();
 394   const size_t len = payload_size * HeapWordSize / sizeof(jint);
 395   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);


 499   }
 500 
 501   LogTarget(Trace, gc, classhisto) lt;
 502   if (lt.is_enabled()) {
 503     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
 504     ResourceMark rm;
 505     LogStream ls(lt);
 506     VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
 507     inspector.doit();
 508   }
 509 }
 510 
 511 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
 512   full_gc_dump(timer, true);
 513 }
 514 
 515 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
 516   full_gc_dump(timer, false);
 517 }
 518 
 519 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
 520   // It is important to do this in a way such that concurrent readers can't
 521   // temporarily think something is in the heap.  (Seen this happen in asserts.)
 522   _reserved.set_word_size(0);
 523   _reserved.set_start(start);
 524   _reserved.set_end(end);
 525 }
 526 
 527 void CollectedHeap::post_initialize() {
 528   initialize_serviceability();
 529 }
 530 
 531 #ifndef PRODUCT
 532 
 533 bool CollectedHeap::promotion_should_fail(volatile size_t* count) {
 534   // Access to count is not atomic; the value does not have to be exact.
 535   if (PromotionFailureALot) {
 536     const size_t gc_num = total_collections();
 537     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
 538     if (elapsed_gcs >= PromotionFailureALotInterval) {
 539       // Test for unsigned arithmetic wrap-around.
 540       if (++*count >= PromotionFailureALotCount) {
 541         *count = 0;
 542         return true;
 543       }
 544     }


< prev index next >