206 }
207
208 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
209 trace_heap(GCWhen::AfterGC, gc_tracer);
210 }
211
212 // Default implementation, for collectors that don't support the feature.
213 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
214 return false;
215 }
216
217 bool CollectedHeap::is_oop(oop object) const {
218 if (!is_object_aligned(object)) {
219 return false;
220 }
221
222 if (!is_in(object)) {
223 return false;
224 }
225
226 if (is_in(object->klass_raw())) {
227 return false;
228 }
229
230 return true;
231 }
232
233 // Memory state functions.
234
235
236 CollectedHeap::CollectedHeap() :
237 _capacity_at_last_gc(0),
238 _used_at_last_gc(0),
239 _is_gc_active(false),
240 _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
241 _total_collections(0),
242 _total_full_collections(0),
243 _gc_cause(GCCause::_no_gc),
244 _gc_lastcause(GCCause::_no_gc)
245 {
246 // If the minimum object size is greater than MinObjAlignment, we can
247 // end up with a shard at the end of the buffer that's smaller than
248 // the smallest object. We can't allow that because the buffer must
249 // look like it's full of objects when we retire it, so we make
250 // sure we have enough space for a filler int array object.
251 size_t min_size = min_dummy_object_size();
252 _lab_alignment_reserve = min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
253
254 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
255 const size_t elements_per_word = HeapWordSize / sizeof(jint);
256 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
257 max_len / elements_per_word);
258
259 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
260 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
261
262 if (UsePerfData) {
263 EXCEPTION_MARK;
264
265 // create the gc cause jvmstat counters
266 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
267 80, GCCause::to_string(_gc_cause), CHECK);
268
269 _perf_gc_lastcause =
270 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
271 80, GCCause::to_string(_gc_lastcause), CHECK);
272 }
273
274 // Create the ring log
275 if (LogEvents) {
276 _gc_heap_log = new GCHeapLog();
277 } else {
398 #ifndef PRODUCT
399 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
400 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
401 // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
402 for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
403 assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
404 }
405 }
406 }
407 #endif // PRODUCT
408
409 size_t CollectedHeap::max_tlab_size() const {
410 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
411 // This restriction could be removed by enabling filling with multiple arrays.
412 // If we compute that the reasonable way as
413 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
414 // we'll overflow on the multiply, so we do the divide first.
415 // We actually lose a little by dividing first,
416 // but that just makes the TLAB somewhat smaller than the biggest array,
417 // which is fine, since we'll be able to fill that.
418 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
419 sizeof(jint) *
420 ((juint) max_jint / (size_t) HeapWordSize);
421 return align_down(max_int_size, MinObjAlignment);
422 }
423
424 size_t CollectedHeap::filler_array_hdr_size() {
425 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
426 }
427
428 size_t CollectedHeap::filler_array_min_size() {
429 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
430 }
431
432 void CollectedHeap::zap_filler_array_with(HeapWord* start, size_t words, juint value) {
433 Copy::fill_to_words(start + filler_array_hdr_size(),
434 words - filler_array_hdr_size(), value);
435 }
436
437 #ifdef ASSERT
438 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
439 {
440 assert(words >= min_fill_size(), "too small to fill");
441 assert(is_object_aligned(words), "unaligned size");
442 }
443
444 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
445 {
446 if (ZapFillerObjects && zap) {
447 zap_filler_array_with(start, words, 0XDEAFBABE);
448 }
449 }
450 #endif // ASSERT
451
452 void
453 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
454 {
455 assert(words >= filler_array_min_size(), "too small for an array");
456 assert(words <= filler_array_max_size(), "too big for a single object");
457
458 const size_t payload_size = words - filler_array_hdr_size();
459 const size_t len = payload_size * HeapWordSize / sizeof(jint);
460 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
461
462 ObjArrayAllocator allocator(Universe::fillerArrayKlassObj(), words, (int)len, /* do_zero */ false);
463 allocator.initialize(start);
464 if (DumpSharedSpaces) {
465 // This array is written into the CDS archive. Make sure it
466 // has deterministic contents.
467 zap_filler_array_with(start, words, 0);
468 } else {
469 DEBUG_ONLY(zap_filler_array(start, words, zap);)
470 }
471 }
472
473 void
474 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
475 {
476 assert(words <= filler_array_max_size(), "too big for a single object");
477
478 if (words >= filler_array_min_size()) {
479 fill_with_array(start, words, zap);
|
206 }
207
208 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
209 trace_heap(GCWhen::AfterGC, gc_tracer);
210 }
211
212 // Default implementation, for collectors that don't support the feature.
213 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
214 return false;
215 }
216
217 bool CollectedHeap::is_oop(oop object) const {
218 if (!is_object_aligned(object)) {
219 return false;
220 }
221
222 if (!is_in(object)) {
223 return false;
224 }
225
226 return true;
227 }
228
229 // Memory state functions.
230
231
232 CollectedHeap::CollectedHeap() :
233 _capacity_at_last_gc(0),
234 _used_at_last_gc(0),
235 _is_gc_active(false),
236 _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
237 _total_collections(0),
238 _total_full_collections(0),
239 _gc_cause(GCCause::_no_gc),
240 _gc_lastcause(GCCause::_no_gc)
241 {
242 // If the minimum object size is greater than MinObjAlignment, we can
243 // end up with a shard at the end of the buffer that's smaller than
244 // the smallest object. We can't allow that because the buffer must
245 // look like it's full of objects when we retire it, so we make
246 // sure we have enough space for a filler int array object.
247 size_t min_size = min_dummy_object_size();
248 _lab_alignment_reserve = min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
249
250 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
251 const size_t elements_per_word = HeapWordSize / sizeof(jint);
252 int header_size_in_bytes = arrayOopDesc::base_offset_in_bytes(T_INT);
253 assert(header_size_in_bytes % sizeof(jint) == 0, "must be aligned to int");
254 int header_size_in_ints = header_size_in_bytes / sizeof(jint);
255 _filler_array_max_size = align_object_size((header_size_in_ints + max_len) / elements_per_word);
256
257 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
258 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
259
260 if (UsePerfData) {
261 EXCEPTION_MARK;
262
263 // create the gc cause jvmstat counters
264 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
265 80, GCCause::to_string(_gc_cause), CHECK);
266
267 _perf_gc_lastcause =
268 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
269 80, GCCause::to_string(_gc_lastcause), CHECK);
270 }
271
272 // Create the ring log
273 if (LogEvents) {
274 _gc_heap_log = new GCHeapLog();
275 } else {
396 #ifndef PRODUCT
397 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
398 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
399 // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
400 for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
401 assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
402 }
403 }
404 }
405 #endif // PRODUCT
406
407 size_t CollectedHeap::max_tlab_size() const {
408 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
409 // This restriction could be removed by enabling filling with multiple arrays.
410 // If we compute that the reasonable way as
411 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
412 // we'll overflow on the multiply, so we do the divide first.
413 // We actually lose a little by dividing first,
414 // but that just makes the TLAB somewhat smaller than the biggest array,
415 // which is fine, since we'll be able to fill that.
416 int header_size_in_bytes = typeArrayOopDesc::base_offset_in_bytes(T_INT);
417 assert(header_size_in_bytes % sizeof(jint) == 0, "header size must align to int");
418 size_t max_int_size = header_size_in_bytes / HeapWordSize +
419 sizeof(jint) *
420 ((juint) max_jint / (size_t) HeapWordSize);
421 return align_down(max_int_size, MinObjAlignment);
422 }
423
424 size_t CollectedHeap::filler_array_min_size() {
425 int aligned_header_size_words = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
426 return align_object_size(aligned_header_size_words); // align to MinObjAlignment
427 }
428
429 void CollectedHeap::zap_filler_array_with(HeapWord* start, size_t words, juint value) {
430 int payload_start = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
431 Copy::fill_to_words(start + payload_start,
432 words - payload_start, value);
433 }
434
435 #ifdef ASSERT
436 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
437 {
438 assert(words >= min_fill_size(), "too small to fill");
439 assert(is_object_aligned(words), "unaligned size");
440 }
441
442 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
443 {
444 if (ZapFillerObjects && zap) {
445 zap_filler_array_with(start, words, 0XDEAFBABE);
446 }
447 }
448 #endif // ASSERT
449
450 void
451 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
452 {
453 assert(words >= filler_array_min_size(), "too small for an array");
454 assert(words <= filler_array_max_size(), "too big for a single object");
455
456 const size_t payload_size_bytes = words * HeapWordSize - arrayOopDesc::base_offset_in_bytes(T_INT);
457 assert(payload_size_bytes % sizeof(jint) == 0, "must be int aligned");
458 const size_t len = payload_size_bytes / sizeof(jint);
459 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
460
461 ObjArrayAllocator allocator(Universe::fillerArrayKlassObj(), words, (int)len, /* do_zero */ false);
462 allocator.initialize(start);
463 if (DumpSharedSpaces) {
464 // This array is written into the CDS archive. Make sure it
465 // has deterministic contents.
466 zap_filler_array_with(start, words, 0);
467 } else {
468 DEBUG_ONLY(zap_filler_array(start, words, zap);)
469 }
470 }
471
472 void
473 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
474 {
475 assert(words <= filler_array_max_size(), "too big for a single object");
476
477 if (words >= filler_array_min_size()) {
478 fill_with_array(start, words, zap);
|