< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page

206 }
207 
208 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
209   trace_heap(GCWhen::AfterGC, gc_tracer);
210 }
211 
212 // Default implementation, for collectors that don't support the feature.
213 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
214   return false;
215 }
216 
217 bool CollectedHeap::is_oop(oop object) const {
218   if (!is_object_aligned(object)) {
219     return false;
220   }
221 
222   if (!is_in(object)) {
223     return false;
224   }
225 
226   if (is_in(object->klass_or_null())) {
227     return false;
228   }
229 
230   return true;
231 }
232 
233 // Memory state functions.
234 
235 
236 CollectedHeap::CollectedHeap() :
237   _capacity_at_last_gc(0),
238   _used_at_last_gc(0),
239   _is_gc_active(false),
240   _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
241   _total_collections(0),
242   _total_full_collections(0),
243   _gc_cause(GCCause::_no_gc),
244   _gc_lastcause(GCCause::_no_gc)
245 {
246   // If the minimum object size is greater than MinObjAlignment, we can
247   // end up with a shard at the end of the buffer that's smaller than
248   // the smallest object.  We can't allow that because the buffer must
249   // look like it's full of objects when we retire it, so we make
250   // sure we have enough space for a filler int array object.
251   size_t min_size = min_dummy_object_size();
252   _lab_alignment_reserve = min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
253 
254   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
255   const size_t elements_per_word = HeapWordSize / sizeof(jint);
256   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
257                                              max_len / elements_per_word);


258 
259   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
260   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
261 
262   if (UsePerfData) {
263     EXCEPTION_MARK;
264 
265     // create the gc cause jvmstat counters
266     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
267                              80, GCCause::to_string(_gc_cause), CHECK);
268 
269     _perf_gc_lastcause =
270                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
271                              80, GCCause::to_string(_gc_lastcause), CHECK);
272   }
273 
274   // Create the ring log
275   if (LogEvents) {
276     _gc_heap_log = new GCHeapLog();
277   } else {

399 #ifndef PRODUCT
400 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
401   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
402     // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
403     for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
404       assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
405     }
406   }
407 }
408 #endif // PRODUCT
409 
410 size_t CollectedHeap::max_tlab_size() const {
411   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
412   // This restriction could be removed by enabling filling with multiple arrays.
413   // If we compute that the reasonable way as
414   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
415   // we'll overflow on the multiply, so we do the divide first.
416   // We actually lose a little by dividing first,
417   // but that just makes the TLAB  somewhat smaller than the biggest array,
418   // which is fine, since we'll be able to fill that.
419   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +


420               sizeof(jint) *
421               ((juint) max_jint / (size_t) HeapWordSize);
422   return align_down(max_int_size, MinObjAlignment);
423 }
424 
425 size_t CollectedHeap::filler_array_hdr_size() {
426   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
427 }
428 
429 size_t CollectedHeap::filler_array_min_size() {
430   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment

431 }
432 
433 void CollectedHeap::zap_filler_array_with(HeapWord* start, size_t words, juint value) {
434   Copy::fill_to_words(start + filler_array_hdr_size(),
435                       words - filler_array_hdr_size(), value);

436 }
437 
438 #ifdef ASSERT
439 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
440 {
441   assert(words >= min_fill_size(), "too small to fill");
442   assert(is_object_aligned(words), "unaligned size");
443 }
444 
445 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
446 {
447   if (ZapFillerObjects && zap) {
448     zap_filler_array_with(start, words, 0XDEAFBABE);
449   }
450 }
451 #endif // ASSERT
452 
453 void
454 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
455 {
456   assert(words >= filler_array_min_size(), "too small for an array");
457   assert(words <= filler_array_max_size(), "too big for a single object");
458 
459   const size_t payload_size = words - filler_array_hdr_size();
460   const size_t len = payload_size * HeapWordSize / sizeof(jint);

461   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
462 
463   ObjArrayAllocator allocator(Universe::fillerArrayKlassObj(), words, (int)len, /* do_zero */ false);
464   allocator.initialize(start);
465   if (DumpSharedSpaces) {
466     // This array is written into the CDS archive. Make sure it
467     // has deterministic contents.
468     zap_filler_array_with(start, words, 0);
469   } else {
470     DEBUG_ONLY(zap_filler_array(start, words, zap);)
471   }
472 }
473 
474 void
475 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
476 {
477   assert(words <= filler_array_max_size(), "too big for a single object");
478 
479   if (words >= filler_array_min_size()) {
480     fill_with_array(start, words, zap);

206 }
207 
208 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
209   trace_heap(GCWhen::AfterGC, gc_tracer);
210 }
211 
212 // Default implementation, for collectors that don't support the feature.
213 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
214   return false;
215 }
216 
217 bool CollectedHeap::is_oop(oop object) const {
218   if (!is_object_aligned(object)) {
219     return false;
220   }
221 
222   if (!is_in(object)) {
223     return false;
224   }
225 




226   return true;
227 }
228 
229 // Memory state functions.
230 
231 
232 CollectedHeap::CollectedHeap() :
233   _capacity_at_last_gc(0),
234   _used_at_last_gc(0),
235   _is_gc_active(false),
236   _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
237   _total_collections(0),
238   _total_full_collections(0),
239   _gc_cause(GCCause::_no_gc),
240   _gc_lastcause(GCCause::_no_gc)
241 {
242   // If the minimum object size is greater than MinObjAlignment, we can
243   // end up with a shard at the end of the buffer that's smaller than
244   // the smallest object.  We can't allow that because the buffer must
245   // look like it's full of objects when we retire it, so we make
246   // sure we have enough space for a filler int array object.
247   size_t min_size = min_dummy_object_size();
248   _lab_alignment_reserve = min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
249 
250   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
251   const size_t elements_per_word = HeapWordSize / sizeof(jint);
252   int header_size_in_bytes = arrayOopDesc::base_offset_in_bytes(T_INT);
253   assert(header_size_in_bytes % sizeof(jint) == 0, "must be aligned to int");
254   int header_size_in_ints = header_size_in_bytes / sizeof(jint);
255   _filler_array_max_size = align_object_size((header_size_in_ints + max_len) / elements_per_word);
256 
257   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
258   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
259 
260   if (UsePerfData) {
261     EXCEPTION_MARK;
262 
263     // create the gc cause jvmstat counters
264     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
265                              80, GCCause::to_string(_gc_cause), CHECK);
266 
267     _perf_gc_lastcause =
268                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
269                              80, GCCause::to_string(_gc_lastcause), CHECK);
270   }
271 
272   // Create the ring log
273   if (LogEvents) {
274     _gc_heap_log = new GCHeapLog();
275   } else {

397 #ifndef PRODUCT
398 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
399   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
400     // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
401     for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
402       assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
403     }
404   }
405 }
406 #endif // PRODUCT
407 
408 size_t CollectedHeap::max_tlab_size() const {
409   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
410   // This restriction could be removed by enabling filling with multiple arrays.
411   // If we compute that the reasonable way as
412   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
413   // we'll overflow on the multiply, so we do the divide first.
414   // We actually lose a little by dividing first,
415   // but that just makes the TLAB  somewhat smaller than the biggest array,
416   // which is fine, since we'll be able to fill that.
417   int header_size_in_bytes = typeArrayOopDesc::base_offset_in_bytes(T_INT);
418   assert(header_size_in_bytes % sizeof(jint) == 0, "header size must align to int");
419   size_t max_int_size = header_size_in_bytes / HeapWordSize +
420               sizeof(jint) *
421               ((juint) max_jint / (size_t) HeapWordSize);
422   return align_down(max_int_size, MinObjAlignment);
423 }
424 




425 size_t CollectedHeap::filler_array_min_size() {
426   int aligned_header_size_words = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
427   return align_object_size(aligned_header_size_words); // align to MinObjAlignment
428 }
429 
430 void CollectedHeap::zap_filler_array_with(HeapWord* start, size_t words, juint value) {
431   int payload_start = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
432   Copy::fill_to_words(start + payload_start,
433                       words - payload_start, value);
434 }
435 
436 #ifdef ASSERT
437 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
438 {
439   assert(words >= min_fill_size(), "too small to fill");
440   assert(is_object_aligned(words), "unaligned size");
441 }
442 
443 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
444 {
445   if (ZapFillerObjects && zap) {
446     zap_filler_array_with(start, words, 0XDEAFBABE);
447   }
448 }
449 #endif // ASSERT
450 
451 void
452 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
453 {
454   assert(words >= filler_array_min_size(), "too small for an array");
455   assert(words <= filler_array_max_size(), "too big for a single object");
456 
457   const size_t payload_size_bytes = words * HeapWordSize - arrayOopDesc::base_offset_in_bytes(T_INT);
458   assert(payload_size_bytes % sizeof(jint) == 0, "must be int aligned");
459   const size_t len = payload_size_bytes / sizeof(jint);
460   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
461 
462   ObjArrayAllocator allocator(Universe::fillerArrayKlassObj(), words, (int)len, /* do_zero */ false);
463   allocator.initialize(start);
464   if (DumpSharedSpaces) {
465     // This array is written into the CDS archive. Make sure it
466     // has deterministic contents.
467     zap_filler_array_with(start, words, 0);
468   } else {
469     DEBUG_ONLY(zap_filler_array(start, words, zap);)
470   }
471 }
472 
473 void
474 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
475 {
476   assert(words <= filler_array_max_size(), "too big for a single object");
477 
478   if (words >= filler_array_min_size()) {
479     fill_with_array(start, words, zap);
< prev index next >