< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page

191 }
192 
193 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
194   trace_heap(GCWhen::AfterGC, gc_tracer);
195 }
196 
197 // Default implementation, for collectors that don't support the feature.
198 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
199   return false;
200 }
201 
202 bool CollectedHeap::is_oop(oop object) const {
203   if (!is_object_aligned(object)) {
204     return false;
205   }
206 
207   if (!is_in(object)) {
208     return false;
209   }
210 
211   if (is_in(object->klass_or_null())) {
212     return false;
213   }
214 
215   return true;
216 }
217 
218 // Memory state functions.
219 
220 
221 CollectedHeap::CollectedHeap() :
222   _capacity_at_last_gc(0),
223   _used_at_last_gc(0),
224   _is_gc_active(false),
225   _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
226   _total_collections(0),
227   _total_full_collections(0),
228   _gc_cause(GCCause::_no_gc),
229   _gc_lastcause(GCCause::_no_gc)
230 {
231   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
232   const size_t elements_per_word = HeapWordSize / sizeof(jint);
233   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
234                                              max_len / elements_per_word);


235 
236   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
237   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
238 
239   if (UsePerfData) {
240     EXCEPTION_MARK;
241 
242     // create the gc cause jvmstat counters
243     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
244                              80, GCCause::to_string(_gc_cause), CHECK);
245 
246     _perf_gc_lastcause =
247                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
248                              80, GCCause::to_string(_gc_lastcause), CHECK);
249   }
250 
251   // Create the ring log
252   if (LogEvents) {
253     _gc_heap_log = new GCHeapLog();
254   } else {

374 #ifndef PRODUCT
375 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
376   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
377     // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
378     for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
379       assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
380     }
381   }
382 }
383 #endif // PRODUCT
384 
385 size_t CollectedHeap::max_tlab_size() const {
386   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
387   // This restriction could be removed by enabling filling with multiple arrays.
388   // If we compute that the reasonable way as
389   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
390   // we'll overflow on the multiply, so we do the divide first.
391   // We actually lose a little by dividing first,
392   // but that just makes the TLAB  somewhat smaller than the biggest array,
393   // which is fine, since we'll be able to fill that.
394   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +


395               sizeof(jint) *
396               ((juint) max_jint / (size_t) HeapWordSize);
397   return align_down(max_int_size, MinObjAlignment);
398 }
399 
400 size_t CollectedHeap::filler_array_hdr_size() {
401   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
402 }
403 
404 size_t CollectedHeap::filler_array_min_size() {
405   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment

406 }
407 
408 #ifdef ASSERT
409 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
410 {
411   assert(words >= min_fill_size(), "too small to fill");
412   assert(is_object_aligned(words), "unaligned size");
413 }
414 
415 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
416 {
417   if (ZapFillerObjects && zap) {
418     Copy::fill_to_words(start + filler_array_hdr_size(),
419                         words - filler_array_hdr_size(), 0XDEAFBABE);

420   }
421 }
422 #endif // ASSERT
423 
424 void
425 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
426 {
427   assert(words >= filler_array_min_size(), "too small for an array");
428   assert(words <= filler_array_max_size(), "too big for a single object");
429 
430   const size_t payload_size = words - filler_array_hdr_size();
431   const size_t len = payload_size * HeapWordSize / sizeof(jint);

432   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
433 
434   ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
435   allocator.initialize(start);
436   DEBUG_ONLY(zap_filler_array(start, words, zap);)
437 }
438 
439 void
440 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
441 {
442   assert(words <= filler_array_max_size(), "too big for a single object");
443 
444   if (words >= filler_array_min_size()) {
445     fill_with_array(start, words, zap);
446   } else if (words > 0) {
447     assert(words == min_fill_size(), "unaligned size");
448     ObjAllocator allocator(vmClasses::Object_klass(), words);
449     allocator.initialize(start);
450   }
451 }

191 }
192 
193 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
194   trace_heap(GCWhen::AfterGC, gc_tracer);
195 }
196 
197 // Default implementation, for collectors that don't support the feature.
198 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
199   return false;
200 }
201 
202 bool CollectedHeap::is_oop(oop object) const {
203   if (!is_object_aligned(object)) {
204     return false;
205   }
206 
207   if (!is_in(object)) {
208     return false;
209   }
210 




211   return true;
212 }
213 
214 // Memory state functions.
215 
216 
217 CollectedHeap::CollectedHeap() :
218   _capacity_at_last_gc(0),
219   _used_at_last_gc(0),
220   _is_gc_active(false),
221   _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
222   _total_collections(0),
223   _total_full_collections(0),
224   _gc_cause(GCCause::_no_gc),
225   _gc_lastcause(GCCause::_no_gc)
226 {
227   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
228   const size_t elements_per_word = HeapWordSize / sizeof(jint);
229   int header_size_in_bytes = arrayOopDesc::base_offset_in_bytes(T_INT);
230   assert(header_size_in_bytes % sizeof(jint) == 0, "must be aligned to int");
231   int header_size_in_ints = header_size_in_bytes / sizeof(jint);
232   _filler_array_max_size = align_object_size((header_size_in_ints + max_len) / elements_per_word);
233 
234   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
235   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
236 
237   if (UsePerfData) {
238     EXCEPTION_MARK;
239 
240     // create the gc cause jvmstat counters
241     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
242                              80, GCCause::to_string(_gc_cause), CHECK);
243 
244     _perf_gc_lastcause =
245                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
246                              80, GCCause::to_string(_gc_lastcause), CHECK);
247   }
248 
249   // Create the ring log
250   if (LogEvents) {
251     _gc_heap_log = new GCHeapLog();
252   } else {

372 #ifndef PRODUCT
373 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
374   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
375     // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
376     for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
377       assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
378     }
379   }
380 }
381 #endif // PRODUCT
382 
383 size_t CollectedHeap::max_tlab_size() const {
384   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
385   // This restriction could be removed by enabling filling with multiple arrays.
386   // If we compute that the reasonable way as
387   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
388   // we'll overflow on the multiply, so we do the divide first.
389   // We actually lose a little by dividing first,
390   // but that just makes the TLAB  somewhat smaller than the biggest array,
391   // which is fine, since we'll be able to fill that.
392   int header_size_in_bytes = typeArrayOopDesc::base_offset_in_bytes(T_INT);
393   assert(header_size_in_bytes % sizeof(jint) == 0, "header size must align to int");
394   size_t max_int_size = header_size_in_bytes / HeapWordSize +
395               sizeof(jint) *
396               ((juint) max_jint / (size_t) HeapWordSize);
397   return align_down(max_int_size, MinObjAlignment);
398 }
399 




400 size_t CollectedHeap::filler_array_min_size() {
401   int aligned_header_size_words = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
402   return align_object_size(aligned_header_size_words); // align to MinObjAlignment
403 }
404 
405 #ifdef ASSERT
406 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
407 {
408   assert(words >= min_fill_size(), "too small to fill");
409   assert(is_object_aligned(words), "unaligned size");
410 }
411 
412 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
413 {
414   if (ZapFillerObjects && zap) {
415   int payload_start = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
416   Copy::fill_to_words(start + payload_start,
417                       words - payload_start, 0XDEAFBABE);
418   }
419 }
420 #endif // ASSERT
421 
422 void
423 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
424 {
425   assert(words >= filler_array_min_size(), "too small for an array");
426   assert(words <= filler_array_max_size(), "too big for a single object");
427 
428   const size_t payload_size_bytes = words * HeapWordSize - arrayOopDesc::base_offset_in_bytes(T_INT);
429   assert(payload_size_bytes % sizeof(jint) == 0, "must be int aligned");
430   const size_t len = payload_size_bytes / sizeof(jint);
431   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
432 
433   ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
434   allocator.initialize(start);
435   DEBUG_ONLY(zap_filler_array(start, words, zap);)
436 }
437 
438 void
439 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
440 {
441   assert(words <= filler_array_max_size(), "too big for a single object");
442 
443   if (words >= filler_array_min_size()) {
444     fill_with_array(start, words, zap);
445   } else if (words > 0) {
446     assert(words == min_fill_size(), "unaligned size");
447     ObjAllocator allocator(vmClasses::Object_klass(), words);
448     allocator.initialize(start);
449   }
450 }
< prev index next >