203 }
204
205 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
206 trace_heap(GCWhen::AfterGC, gc_tracer);
207 }
208
209 // Default implementation, for collectors that don't support the feature.
210 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
211 return false;
212 }
213
214 bool CollectedHeap::is_oop(oop object) const {
215 if (!is_object_aligned(object)) {
216 return false;
217 }
218
219 if (!is_in(object)) {
220 return false;
221 }
222
223 if (is_in(object->klass_or_null())) {
224 return false;
225 }
226
227 return true;
228 }
229
230 // Memory state functions.
231
232
233 CollectedHeap::CollectedHeap() :
234 _capacity_at_last_gc(0),
235 _used_at_last_gc(0),
236 _is_gc_active(false),
237 _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
238 _total_collections(0),
239 _total_full_collections(0),
240 _gc_cause(GCCause::_no_gc),
241 _gc_lastcause(GCCause::_no_gc)
242 {
243 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
244 const size_t elements_per_word = HeapWordSize / sizeof(jint);
245 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
246 max_len / elements_per_word);
247
248 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
249 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
250
251 if (UsePerfData) {
252 EXCEPTION_MARK;
253
254 // create the gc cause jvmstat counters
255 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
256 80, GCCause::to_string(_gc_cause), CHECK);
257
258 _perf_gc_lastcause =
259 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
260 80, GCCause::to_string(_gc_lastcause), CHECK);
261 }
262
263 // Create the ring log
264 if (LogEvents) {
265 _gc_heap_log = new GCHeapLog();
266 } else {
375 }
376
377 void CollectedHeap::set_gc_cause(GCCause::Cause v) {
378 if (UsePerfData) {
379 _gc_lastcause = _gc_cause;
380 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
381 _perf_gc_cause->set_value(GCCause::to_string(v));
382 }
383 _gc_cause = v;
384 }
385
386 size_t CollectedHeap::max_tlab_size() const {
387 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
388 // This restriction could be removed by enabling filling with multiple arrays.
389 // If we compute that the reasonable way as
390 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
391 // we'll overflow on the multiply, so we do the divide first.
392 // We actually lose a little by dividing first,
393 // but that just makes the TLAB somewhat smaller than the biggest array,
394 // which is fine, since we'll be able to fill that.
395 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
396 sizeof(jint) *
397 ((juint) max_jint / (size_t) HeapWordSize);
398 return align_down(max_int_size, MinObjAlignment);
399 }
400
401 size_t CollectedHeap::filler_array_hdr_size() {
402 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
403 }
404
405 size_t CollectedHeap::filler_array_min_size() {
406 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
407 }
408
409 #ifdef ASSERT
410 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
411 {
412 assert(words >= min_fill_size(), "too small to fill");
413 assert(is_object_aligned(words), "unaligned size");
414 }
415
416 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
417 {
418 if (ZapFillerObjects && zap) {
419 Copy::fill_to_words(start + filler_array_hdr_size(),
420 words - filler_array_hdr_size(), 0XDEAFBABE);
421 }
422 }
423 #endif // ASSERT
424
425 void
426 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
427 {
428 assert(words >= filler_array_min_size(), "too small for an array");
429 assert(words <= filler_array_max_size(), "too big for a single object");
430
431 const size_t payload_size = words - filler_array_hdr_size();
432 const size_t len = payload_size * HeapWordSize / sizeof(jint);
433 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
434
435 ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
436 allocator.initialize(start);
437 DEBUG_ONLY(zap_filler_array(start, words, zap);)
438 }
439
440 void
441 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
442 {
443 assert(words <= filler_array_max_size(), "too big for a single object");
444
445 if (words >= filler_array_min_size()) {
446 fill_with_array(start, words, zap);
447 } else if (words > 0) {
448 assert(words == min_fill_size(), "unaligned size");
449 ObjAllocator allocator(vmClasses::Object_klass(), words);
450 allocator.initialize(start);
451 }
452 }
|
203 }
204
205 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
206 trace_heap(GCWhen::AfterGC, gc_tracer);
207 }
208
209 // Default implementation, for collectors that don't support the feature.
210 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
211 return false;
212 }
213
214 bool CollectedHeap::is_oop(oop object) const {
215 if (!is_object_aligned(object)) {
216 return false;
217 }
218
219 if (!is_in(object)) {
220 return false;
221 }
222
223 return true;
224 }
225
226 // Memory state functions.
227
228
229 CollectedHeap::CollectedHeap() :
230 _capacity_at_last_gc(0),
231 _used_at_last_gc(0),
232 _is_gc_active(false),
233 _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
234 _total_collections(0),
235 _total_full_collections(0),
236 _gc_cause(GCCause::_no_gc),
237 _gc_lastcause(GCCause::_no_gc)
238 {
239 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
240 const size_t elements_per_word = HeapWordSize / sizeof(jint);
241 int header_size_in_bytes = arrayOopDesc::base_offset_in_bytes(T_INT);
242 assert(header_size_in_bytes % sizeof(jint) == 0, "must be aligned to int");
243 int header_size_in_ints = header_size_in_bytes / sizeof(jint);
244 _filler_array_max_size = align_object_size((header_size_in_ints + max_len) / elements_per_word);
245
246 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
247 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
248
249 if (UsePerfData) {
250 EXCEPTION_MARK;
251
252 // create the gc cause jvmstat counters
253 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
254 80, GCCause::to_string(_gc_cause), CHECK);
255
256 _perf_gc_lastcause =
257 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
258 80, GCCause::to_string(_gc_lastcause), CHECK);
259 }
260
261 // Create the ring log
262 if (LogEvents) {
263 _gc_heap_log = new GCHeapLog();
264 } else {
373 }
374
375 void CollectedHeap::set_gc_cause(GCCause::Cause v) {
376 if (UsePerfData) {
377 _gc_lastcause = _gc_cause;
378 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
379 _perf_gc_cause->set_value(GCCause::to_string(v));
380 }
381 _gc_cause = v;
382 }
383
384 size_t CollectedHeap::max_tlab_size() const {
385 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
386 // This restriction could be removed by enabling filling with multiple arrays.
387 // If we compute that the reasonable way as
388 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
389 // we'll overflow on the multiply, so we do the divide first.
390 // We actually lose a little by dividing first,
391 // but that just makes the TLAB somewhat smaller than the biggest array,
392 // which is fine, since we'll be able to fill that.
393 int header_size_in_bytes = typeArrayOopDesc::base_offset_in_bytes(T_INT);
394 assert(header_size_in_bytes % sizeof(jint) == 0, "header size must align to int");
395 size_t max_int_size = header_size_in_bytes / HeapWordSize +
396 sizeof(jint) *
397 ((juint) max_jint / (size_t) HeapWordSize);
398 return align_down(max_int_size, MinObjAlignment);
399 }
400
401 size_t CollectedHeap::filler_array_min_size() {
402 int aligned_header_size_words = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
403 return align_object_size(aligned_header_size_words); // align to MinObjAlignment
404 }
405
406 #ifdef ASSERT
407 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
408 {
409 assert(words >= min_fill_size(), "too small to fill");
410 assert(is_object_aligned(words), "unaligned size");
411 }
412
413 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
414 {
415 if (ZapFillerObjects && zap) {
416 int payload_start = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
417 Copy::fill_to_words(start + payload_start,
418 words - payload_start, 0XDEAFBABE);
419 }
420 }
421 #endif // ASSERT
422
423 void
424 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
425 {
426 assert(words >= filler_array_min_size(), "too small for an array");
427 assert(words <= filler_array_max_size(), "too big for a single object");
428
429 const size_t payload_size_bytes = words * HeapWordSize - arrayOopDesc::base_offset_in_bytes(T_INT);
430 assert(payload_size_bytes % sizeof(jint) == 0, "must be int aligned");
431 const size_t len = payload_size_bytes / sizeof(jint);
432 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
433
434 ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
435 allocator.initialize(start);
436 DEBUG_ONLY(zap_filler_array(start, words, zap);)
437 }
438
439 void
440 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
441 {
442 assert(words <= filler_array_max_size(), "too big for a single object");
443
444 if (words >= filler_array_min_size()) {
445 fill_with_array(start, words, zap);
446 } else if (words > 0) {
447 assert(words == min_fill_size(), "unaligned size");
448 ObjAllocator allocator(vmClasses::Object_klass(), words);
449 allocator.initialize(start);
450 }
451 }
|