< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page

211 }
212 
213 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
214   trace_heap(GCWhen::AfterGC, gc_tracer);
215 }
216 
217 // Default implementation, for collectors that don't support the feature.
218 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
219   return false;
220 }
221 
222 bool CollectedHeap::is_oop(oop object) const {
223   if (!is_object_aligned(object)) {
224     return false;
225   }
226 
227   if (!is_in(object)) {
228     return false;
229   }
230 
231   if (!Metaspace::contains(object->klass_raw())) {


232     return false;
233   }
234 
235   return true;
236 }
237 
238 // Memory state functions.
239 
240 
241 CollectedHeap::CollectedHeap() :
242   _capacity_at_last_gc(0),
243   _used_at_last_gc(0),
244   _is_stw_gc_active(false),
245   _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
246   _total_collections(0),
247   _total_full_collections(0),
248   _gc_cause(GCCause::_no_gc),
249   _gc_lastcause(GCCause::_no_gc)
250 {
251   // If the minimum object size is greater than MinObjAlignment, we can

383         (loop_count % QueuedAllocationWarningCount == 0)) {
384       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
385                             " size=" SIZE_FORMAT, loop_count, word_size);
386     }
387   } while (true);  // Until a GC is done
388 }
389 
390 MemoryUsage CollectedHeap::memory_usage() {
391   return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
392 }
393 
394 void CollectedHeap::set_gc_cause(GCCause::Cause v) {
395   if (UsePerfData) {
396     _gc_lastcause = _gc_cause;
397     _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
398     _perf_gc_cause->set_value(GCCause::to_string(v));
399   }
400   _gc_cause = v;
401 }
402 







403 size_t CollectedHeap::max_tlab_size() const {
404   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
405   // This restriction could be removed by enabling filling with multiple arrays.
406   // If we compute that the reasonable way as
407   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
408   // we'll overflow on the multiply, so we do the divide first.
409   // We actually lose a little by dividing first,
410   // but that just makes the TLAB  somewhat smaller than the biggest array,
411   // which is fine, since we'll be able to fill that.
412   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
413               sizeof(jint) *
414               ((juint) max_jint / (size_t) HeapWordSize);
415   return align_down(max_int_size, MinObjAlignment);
416 }
417 
418 size_t CollectedHeap::filler_array_hdr_size() {
419   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
420 }
421 
422 size_t CollectedHeap::filler_array_min_size() {
423   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
424 }
425 
426 void CollectedHeap::zap_filler_array_with(HeapWord* start, size_t words, juint value) {
427   Copy::fill_to_words(start + filler_array_hdr_size(),
428                       words - filler_array_hdr_size(), value);
429 }
430 
431 #ifdef ASSERT
432 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
433 {
434   assert(words >= min_fill_size(), "too small to fill");
435   assert(is_object_aligned(words), "unaligned size");
436 }
437 
438 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
439 {

211 }
212 
213 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
214   trace_heap(GCWhen::AfterGC, gc_tracer);
215 }
216 
217 // Default implementation, for collectors that don't support the feature.
218 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
219   return false;
220 }
221 
222 bool CollectedHeap::is_oop(oop object) const {
223   if (!is_object_aligned(object)) {
224     return false;
225   }
226 
227   if (!is_in(object)) {
228     return false;
229   }
230 
231   // With compact headers, we can't safely access the class, due
232   // to possibly forwarded objects.
233   if (!UseCompactObjectHeaders && !Metaspace::contains(object->klass_raw())) {
234     return false;
235   }
236 
237   return true;
238 }
239 
240 // Memory state functions.
241 
242 
243 CollectedHeap::CollectedHeap() :
244   _capacity_at_last_gc(0),
245   _used_at_last_gc(0),
246   _is_stw_gc_active(false),
247   _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
248   _total_collections(0),
249   _total_full_collections(0),
250   _gc_cause(GCCause::_no_gc),
251   _gc_lastcause(GCCause::_no_gc)
252 {
253   // If the minimum object size is greater than MinObjAlignment, we can

385         (loop_count % QueuedAllocationWarningCount == 0)) {
386       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
387                             " size=" SIZE_FORMAT, loop_count, word_size);
388     }
389   } while (true);  // Until a GC is done
390 }
391 
392 MemoryUsage CollectedHeap::memory_usage() {
393   return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
394 }
395 
396 void CollectedHeap::set_gc_cause(GCCause::Cause v) {
397   if (UsePerfData) {
398     _gc_lastcause = _gc_cause;
399     _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
400     _perf_gc_cause->set_value(GCCause::to_string(v));
401   }
402   _gc_cause = v;
403 }
404 
405 // Returns the header size in words aligned to the requirements of the
406 // array object type.
407 static int int_array_header_size() {
408   size_t typesize_in_bytes = arrayOopDesc::header_size_in_bytes();
409   return (int)align_up(typesize_in_bytes, HeapWordSize)/HeapWordSize;
410 }
411 
412 size_t CollectedHeap::max_tlab_size() const {
413   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
414   // This restriction could be removed by enabling filling with multiple arrays.
415   // If we compute that the reasonable way as
416   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
417   // we'll overflow on the multiply, so we do the divide first.
418   // We actually lose a little by dividing first,
419   // but that just makes the TLAB  somewhat smaller than the biggest array,
420   // which is fine, since we'll be able to fill that.
421   size_t max_int_size = int_array_header_size() +
422               sizeof(jint) *
423               ((juint) max_jint / (size_t) HeapWordSize);
424   return align_down(max_int_size, MinObjAlignment);
425 }
426 
427 size_t CollectedHeap::filler_array_hdr_size() {
428   return align_object_offset(int_array_header_size()); // align to Long
429 }
430 
431 size_t CollectedHeap::filler_array_min_size() {
432   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
433 }
434 
435 void CollectedHeap::zap_filler_array_with(HeapWord* start, size_t words, juint value) {
436   Copy::fill_to_words(start + filler_array_hdr_size(),
437                       words - filler_array_hdr_size(), value);
438 }
439 
440 #ifdef ASSERT
441 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
442 {
443   assert(words >= min_fill_size(), "too small to fill");
444   assert(is_object_aligned(words), "unaligned size");
445 }
446 
447 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
448 {
< prev index next >