203 }
204
205 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
206 trace_heap(GCWhen::AfterGC, gc_tracer);
207 }
208
209 // Default implementation, for collectors that don't support the feature.
210 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
211 return false;
212 }
213
214 bool CollectedHeap::is_oop(oop object) const {
215 if (!is_object_aligned(object)) {
216 return false;
217 }
218
219 if (!is_in(object)) {
220 return false;
221 }
222
223 if (is_in(object->klass_or_null())) {
224 return false;
225 }
226
227 return true;
228 }
229
230 // Memory state functions.
231
232
233 CollectedHeap::CollectedHeap() :
234 _capacity_at_last_gc(0),
235 _used_at_last_gc(0),
236 _is_gc_active(false),
237 _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
238 _total_collections(0),
239 _total_full_collections(0),
240 _gc_cause(GCCause::_no_gc),
241 _gc_lastcause(GCCause::_no_gc)
242 {
243 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
244 const size_t elements_per_word = HeapWordSize / sizeof(jint);
245 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
246 max_len / elements_per_word);
247
248 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
249 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
250
251 if (UsePerfData) {
252 EXCEPTION_MARK;
253
254 // create the gc cause jvmstat counters
255 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
256 80, GCCause::to_string(_gc_cause), CHECK);
257
258 _perf_gc_lastcause =
259 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
260 80, GCCause::to_string(_gc_lastcause), CHECK);
261 }
262
263 // Create the ring log
264 if (LogEvents) {
265 _gc_heap_log = new GCHeapLog();
266 } else {
386 #ifndef PRODUCT
387 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
388 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
389 // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
390 for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
391 assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
392 }
393 }
394 }
395 #endif // PRODUCT
396
397 size_t CollectedHeap::max_tlab_size() const {
398 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
399 // This restriction could be removed by enabling filling with multiple arrays.
400 // If we compute that the reasonable way as
401 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
402 // we'll overflow on the multiply, so we do the divide first.
403 // We actually lose a little by dividing first,
404 // but that just makes the TLAB somewhat smaller than the biggest array,
405 // which is fine, since we'll be able to fill that.
406 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
407 sizeof(jint) *
408 ((juint) max_jint / (size_t) HeapWordSize);
409 return align_down(max_int_size, MinObjAlignment);
410 }
411
412 size_t CollectedHeap::filler_array_hdr_size() {
413 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
414 }
415
416 size_t CollectedHeap::filler_array_min_size() {
417 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
418 }
419
420 void CollectedHeap::zap_filler_array_with(HeapWord* start, size_t words, juint value) {
421 Copy::fill_to_words(start + filler_array_hdr_size(),
422 words - filler_array_hdr_size(), value);
423 }
424
425 #ifdef ASSERT
426 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
427 {
428 assert(words >= min_fill_size(), "too small to fill");
429 assert(is_object_aligned(words), "unaligned size");
430 }
431
432 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
433 {
434 if (ZapFillerObjects && zap) {
435 zap_filler_array_with(start, words, 0XDEAFBABE);
436 }
437 }
438 #endif // ASSERT
439
440 void
441 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
442 {
443 assert(words >= filler_array_min_size(), "too small for an array");
444 assert(words <= filler_array_max_size(), "too big for a single object");
445
446 const size_t payload_size = words - filler_array_hdr_size();
447 const size_t len = payload_size * HeapWordSize / sizeof(jint);
448 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
449
450 ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
451 allocator.initialize(start);
452 if (DumpSharedSpaces) {
453 // This array is written into the CDS archive. Make sure it
454 // has deterministic contents.
455 zap_filler_array_with(start, words, 0);
456 } else {
457 DEBUG_ONLY(zap_filler_array(start, words, zap);)
458 }
459 }
460
461 void
462 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
463 {
464 assert(words <= filler_array_max_size(), "too big for a single object");
465
466 if (words >= filler_array_min_size()) {
467 fill_with_array(start, words, zap);
|
203 }
204
205 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
206 trace_heap(GCWhen::AfterGC, gc_tracer);
207 }
208
209 // Default implementation, for collectors that don't support the feature.
210 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
211 return false;
212 }
213
214 bool CollectedHeap::is_oop(oop object) const {
215 if (!is_object_aligned(object)) {
216 return false;
217 }
218
219 if (!is_in(object)) {
220 return false;
221 }
222
223 return true;
224 }
225
226 // Memory state functions.
227
228
229 CollectedHeap::CollectedHeap() :
230 _capacity_at_last_gc(0),
231 _used_at_last_gc(0),
232 _is_gc_active(false),
233 _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
234 _total_collections(0),
235 _total_full_collections(0),
236 _gc_cause(GCCause::_no_gc),
237 _gc_lastcause(GCCause::_no_gc)
238 {
239 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
240 const size_t elements_per_word = HeapWordSize / sizeof(jint);
241 int header_size_in_bytes = arrayOopDesc::base_offset_in_bytes(T_INT);
242 assert(header_size_in_bytes % sizeof(jint) == 0, "must be aligned to int");
243 int header_size_in_ints = header_size_in_bytes / sizeof(jint);
244 _filler_array_max_size = align_object_size((header_size_in_ints + max_len) / elements_per_word);
245
246 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
247 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
248
249 if (UsePerfData) {
250 EXCEPTION_MARK;
251
252 // create the gc cause jvmstat counters
253 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
254 80, GCCause::to_string(_gc_cause), CHECK);
255
256 _perf_gc_lastcause =
257 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
258 80, GCCause::to_string(_gc_lastcause), CHECK);
259 }
260
261 // Create the ring log
262 if (LogEvents) {
263 _gc_heap_log = new GCHeapLog();
264 } else {
384 #ifndef PRODUCT
385 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
386 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
387 // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
388 for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
389 assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
390 }
391 }
392 }
393 #endif // PRODUCT
394
395 size_t CollectedHeap::max_tlab_size() const {
396 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
397 // This restriction could be removed by enabling filling with multiple arrays.
398 // If we compute that the reasonable way as
399 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
400 // we'll overflow on the multiply, so we do the divide first.
401 // We actually lose a little by dividing first,
402 // but that just makes the TLAB somewhat smaller than the biggest array,
403 // which is fine, since we'll be able to fill that.
404 int header_size_in_bytes = typeArrayOopDesc::base_offset_in_bytes(T_INT);
405 assert(header_size_in_bytes % sizeof(jint) == 0, "header size must align to int");
406 size_t max_int_size = header_size_in_bytes / HeapWordSize +
407 sizeof(jint) *
408 ((juint) max_jint / (size_t) HeapWordSize);
409 return align_down(max_int_size, MinObjAlignment);
410 }
411
412 size_t CollectedHeap::filler_array_min_size() {
413 int aligned_header_size_words = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
414 return align_object_size(aligned_header_size_words); // align to MinObjAlignment
415 }
416
417 void CollectedHeap::zap_filler_array_with(HeapWord* start, size_t words, juint value) {
418 int payload_start = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
419 Copy::fill_to_words(start + payload_start,
420 words - payload_start, value);
421 }
422
423 #ifdef ASSERT
424 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
425 {
426 assert(words >= min_fill_size(), "too small to fill");
427 assert(is_object_aligned(words), "unaligned size");
428 }
429
430 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
431 {
432 if (ZapFillerObjects && zap) {
433 zap_filler_array_with(start, words, 0XDEAFBABE);
434 }
435 }
436 #endif // ASSERT
437
438 void
439 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
440 {
441 assert(words >= filler_array_min_size(), "too small for an array");
442 assert(words <= filler_array_max_size(), "too big for a single object");
443
444 const size_t payload_size_bytes = words * HeapWordSize - arrayOopDesc::base_offset_in_bytes(T_INT);
445 assert(payload_size_bytes % sizeof(jint) == 0, "must be int aligned");
446 const size_t len = payload_size_bytes / sizeof(jint);
447 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
448
449 ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
450 allocator.initialize(start);
451 if (DumpSharedSpaces) {
452 // This array is written into the CDS archive. Make sure it
453 // has deterministic contents.
454 zap_filler_array_with(start, words, 0);
455 } else {
456 DEBUG_ONLY(zap_filler_array(start, words, zap);)
457 }
458 }
459
460 void
461 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
462 {
463 assert(words <= filler_array_max_size(), "too big for a single object");
464
465 if (words >= filler_array_min_size()) {
466 fill_with_array(start, words, zap);
|