< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page

211 }
212 
213 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
214   trace_heap(GCWhen::AfterGC, gc_tracer);
215 }
216 
217 // Default implementation, for collectors that don't support the feature.
218 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
219   return false;
220 }
221 
222 bool CollectedHeap::is_oop(oop object) const {
223   if (!is_object_aligned(object)) {
224     return false;
225   }
226 
227   if (!is_in(object)) {
228     return false;
229   }
230 
231   if (!Metaspace::contains(object->klass_raw())) {
232     return false;
233   }
234 
235   return true;
236 }
237 
238 // Memory state functions.
239 
240 
241 CollectedHeap::CollectedHeap() :
242   _capacity_at_last_gc(0),
243   _used_at_last_gc(0),
244   _is_gc_active(false),
245   _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
246   _total_collections(0),
247   _total_full_collections(0),
248   _gc_cause(GCCause::_no_gc),
249   _gc_lastcause(GCCause::_no_gc)
250 {
251   // If the minimum object size is greater than MinObjAlignment, we can
252   // end up with a shard at the end of the buffer that's smaller than
253   // the smallest object.  We can't allow that because the buffer must
254   // look like it's full of objects when we retire it, so we make
255   // sure we have enough space for a filler int array object.
256   size_t min_size = min_dummy_object_size();
257   _lab_alignment_reserve = min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
258 
259   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
260   const size_t elements_per_word = HeapWordSize / sizeof(jint);
261   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
262                                              max_len / elements_per_word);


263 
264   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
265   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
266 
267   if (UsePerfData) {
268     EXCEPTION_MARK;
269 
270     // create the gc cause jvmstat counters
271     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
272                              80, GCCause::to_string(_gc_cause), CHECK);
273 
274     _perf_gc_lastcause =
275                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
276                              80, GCCause::to_string(_gc_lastcause), CHECK);
277   }
278 
279   // Create the ring log
280   if (LogEvents) {
281     _gc_heap_log = new GCHeapLog();
282   } else {

392 }
393 
394 void CollectedHeap::set_gc_cause(GCCause::Cause v) {
395   if (UsePerfData) {
396     _gc_lastcause = _gc_cause;
397     _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
398     _perf_gc_cause->set_value(GCCause::to_string(v));
399   }
400   _gc_cause = v;
401 }
402 
403 size_t CollectedHeap::max_tlab_size() const {
404   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
405   // This restriction could be removed by enabling filling with multiple arrays.
406   // If we compute that the reasonable way as
407   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
408   // we'll overflow on the multiply, so we do the divide first.
409   // We actually lose a little by dividing first,
410   // but that just makes the TLAB  somewhat smaller than the biggest array,
411   // which is fine, since we'll be able to fill that.
412   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +


413               sizeof(jint) *
414               ((juint) max_jint / (size_t) HeapWordSize);
415   return align_down(max_int_size, MinObjAlignment);
416 }
417 
418 size_t CollectedHeap::filler_array_hdr_size() {
419   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
420 }
421 
422 size_t CollectedHeap::filler_array_min_size() {
423   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment

424 }
425 
426 void CollectedHeap::zap_filler_array_with(HeapWord* start, size_t words, juint value) {
427   Copy::fill_to_words(start + filler_array_hdr_size(),
428                       words - filler_array_hdr_size(), value);

429 }
430 
431 #ifdef ASSERT
432 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
433 {
434   assert(words >= min_fill_size(), "too small to fill");
435   assert(is_object_aligned(words), "unaligned size");
436 }
437 
438 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
439 {
440   if (ZapFillerObjects && zap) {
441     zap_filler_array_with(start, words, 0XDEAFBABE);
442   }
443 }
444 #endif // ASSERT
445 
446 void
447 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
448 {
449   assert(words >= filler_array_min_size(), "too small for an array");
450   assert(words <= filler_array_max_size(), "too big for a single object");
451 
452   const size_t payload_size = words - filler_array_hdr_size();
453   const size_t len = payload_size * HeapWordSize / sizeof(jint);

454   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
455 
456   ObjArrayAllocator allocator(Universe::fillerArrayKlassObj(), words, (int)len, /* do_zero */ false);
457   allocator.initialize(start);
458   if (DumpSharedSpaces) {
459     // This array is written into the CDS archive. Make sure it
460     // has deterministic contents.
461     zap_filler_array_with(start, words, 0);
462   } else {
463     DEBUG_ONLY(zap_filler_array(start, words, zap);)
464   }
465 }
466 
467 void
468 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
469 {
470   assert(words <= filler_array_max_size(), "too big for a single object");
471 
472   if (words >= filler_array_min_size()) {
473     fill_with_array(start, words, zap);

211 }
212 
213 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
214   trace_heap(GCWhen::AfterGC, gc_tracer);
215 }
216 
217 // Default implementation, for collectors that don't support the feature.
218 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
219   return false;
220 }
221 
222 bool CollectedHeap::is_oop(oop object) const {
223   if (!is_object_aligned(object)) {
224     return false;
225   }
226 
227   if (!is_in(object)) {
228     return false;
229   }
230 
231   if (!UseCompactObjectHeaders && !Metaspace::contains(object->klass_raw())) {
232     return false;
233   }
234 
235   return true;
236 }
237 
238 // Memory state functions.
239 
240 
241 CollectedHeap::CollectedHeap() :
242   _capacity_at_last_gc(0),
243   _used_at_last_gc(0),
244   _is_gc_active(false),
245   _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
246   _total_collections(0),
247   _total_full_collections(0),
248   _gc_cause(GCCause::_no_gc),
249   _gc_lastcause(GCCause::_no_gc)
250 {
251   // If the minimum object size is greater than MinObjAlignment, we can
252   // end up with a shard at the end of the buffer that's smaller than
253   // the smallest object.  We can't allow that because the buffer must
254   // look like it's full of objects when we retire it, so we make
255   // sure we have enough space for a filler int array object.
256   size_t min_size = min_dummy_object_size();
257   _lab_alignment_reserve = min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
258 
259   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
260   const size_t elements_per_word = HeapWordSize / sizeof(jint);
261   int header_size_in_bytes = arrayOopDesc::base_offset_in_bytes(T_INT);
262   assert(header_size_in_bytes % sizeof(jint) == 0, "must be aligned to int");
263   int header_size_in_ints = header_size_in_bytes / sizeof(jint);
264   _filler_array_max_size = align_object_size((header_size_in_ints + max_len) / elements_per_word);
265 
266   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
267   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
268 
269   if (UsePerfData) {
270     EXCEPTION_MARK;
271 
272     // create the gc cause jvmstat counters
273     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
274                              80, GCCause::to_string(_gc_cause), CHECK);
275 
276     _perf_gc_lastcause =
277                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
278                              80, GCCause::to_string(_gc_lastcause), CHECK);
279   }
280 
281   // Create the ring log
282   if (LogEvents) {
283     _gc_heap_log = new GCHeapLog();
284   } else {

394 }
395 
396 void CollectedHeap::set_gc_cause(GCCause::Cause v) {
397   if (UsePerfData) {
398     _gc_lastcause = _gc_cause;
399     _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
400     _perf_gc_cause->set_value(GCCause::to_string(v));
401   }
402   _gc_cause = v;
403 }
404 
405 size_t CollectedHeap::max_tlab_size() const {
406   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
407   // This restriction could be removed by enabling filling with multiple arrays.
408   // If we compute that the reasonable way as
409   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
410   // we'll overflow on the multiply, so we do the divide first.
411   // We actually lose a little by dividing first,
412   // but that just makes the TLAB  somewhat smaller than the biggest array,
413   // which is fine, since we'll be able to fill that.
414   int header_size_in_bytes = typeArrayOopDesc::base_offset_in_bytes(T_INT);
415   assert(header_size_in_bytes % sizeof(jint) == 0, "header size must align to int");
416   size_t max_int_size = header_size_in_bytes / HeapWordSize +
417               sizeof(jint) *
418               ((juint) max_jint / (size_t) HeapWordSize);
419   return align_down(max_int_size, MinObjAlignment);
420 }
421 




422 size_t CollectedHeap::filler_array_min_size() {
423   int aligned_header_size_words = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
424   return align_object_size(aligned_header_size_words); // align to MinObjAlignment
425 }
426 
427 void CollectedHeap::zap_filler_array_with(HeapWord* start, size_t words, juint value) {
428   int payload_start = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
429   Copy::fill_to_words(start + payload_start,
430                       words - payload_start, value);
431 }
432 
433 #ifdef ASSERT
434 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
435 {
436   assert(words >= min_fill_size(), "too small to fill");
437   assert(is_object_aligned(words), "unaligned size");
438 }
439 
440 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
441 {
442   if (ZapFillerObjects && zap) {
443     zap_filler_array_with(start, words, 0XDEAFBABE);
444   }
445 }
446 #endif // ASSERT
447 
448 void
449 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
450 {
451   assert(words >= filler_array_min_size(), "too small for an array");
452   assert(words <= filler_array_max_size(), "too big for a single object");
453 
454   const size_t payload_size_bytes = words * HeapWordSize - arrayOopDesc::base_offset_in_bytes(T_INT);
455   assert(payload_size_bytes % sizeof(jint) == 0, "must be int aligned");
456   const size_t len = payload_size_bytes / sizeof(jint);
457   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
458 
459   ObjArrayAllocator allocator(Universe::fillerArrayKlassObj(), words, (int)len, /* do_zero */ false);
460   allocator.initialize(start);
461   if (DumpSharedSpaces) {
462     // This array is written into the CDS archive. Make sure it
463     // has deterministic contents.
464     zap_filler_array_with(start, words, 0);
465   } else {
466     DEBUG_ONLY(zap_filler_array(start, words, zap);)
467   }
468 }
469 
470 void
471 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
472 {
473   assert(words <= filler_array_max_size(), "too big for a single object");
474 
475   if (words >= filler_array_min_size()) {
476     fill_with_array(start, words, zap);
< prev index next >