< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page

191 }
192 
193 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
194   trace_heap(GCWhen::AfterGC, gc_tracer);
195 }
196 
197 // Default implementation, for collectors that don't support the feature.
198 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
199   return false;
200 }
201 
202 bool CollectedHeap::is_oop(oop object) const {
203   if (!is_object_aligned(object)) {
204     return false;
205   }
206 
207   if (!is_in(object)) {
208     return false;
209   }
210 
211   if (is_in(object->klass_or_null())) {
212     return false;
213   }
214 
215   return true;
216 }
217 
218 // Memory state functions.
219 
220 
221 CollectedHeap::CollectedHeap() :
222   _capacity_at_last_gc(0),
223   _used_at_last_gc(0),
224   _is_gc_active(false),
225   _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
226   _total_collections(0),
227   _total_full_collections(0),
228   _gc_cause(GCCause::_no_gc),
229   _gc_lastcause(GCCause::_no_gc)
230 {
231   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));

251   // Create the ring log
252   if (LogEvents) {
253     _gc_heap_log = new GCHeapLog();
254   } else {
255     _gc_heap_log = NULL;
256   }
257 }
258 
259 // This interface assumes that it's being called by the
260 // vm thread. It collects the heap assuming that the
261 // heap lock is already held and that we are executing in
262 // the context of the vm thread.
263 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
264   Thread* thread = Thread::current();
265   assert(thread->is_VM_thread(), "Precondition#1");
266   assert(Heap_lock->is_locked(), "Precondition#2");
267   GCCauseSetter gcs(this, cause);
268   switch (cause) {
269     case GCCause::_heap_inspection:
270     case GCCause::_heap_dump:

271     case GCCause::_metadata_GC_threshold : {
272       HandleMark hm(thread);
273       do_full_collection(false);        // don't clear all soft refs
274       break;
275     }
276     case GCCause::_archive_time_gc:
277     case GCCause::_metadata_GC_clear_soft_refs: {
278       HandleMark hm(thread);
279       do_full_collection(true);         // do clear all soft refs
280       break;
281     }
282     default:
283       ShouldNotReachHere(); // Unexpected use of this function
284   }
285 }
286 
287 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
288                                                             size_t word_size,
289                                                             Metaspace::MetadataType mdtype) {
290   uint loop_count = 0;

341     VMThread::execute(&op);
342 
343     // If GC was locked out, try again. Check before checking success because the
344     // prologue could have succeeded and the GC still have been locked out.
345     if (op.gc_locked()) {
346       continue;
347     }
348 
349     if (op.prologue_succeeded()) {
350       return op.result();
351     }
352     loop_count++;
353     if ((QueuedAllocationWarningCount > 0) &&
354         (loop_count % QueuedAllocationWarningCount == 0)) {
355       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
356                             " size=" SIZE_FORMAT, loop_count, word_size);
357     }
358   } while (true);  // Until a GC is done
359 }
360 



























































361 MemoryUsage CollectedHeap::memory_usage() {
362   return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
363 }
364 
365 void CollectedHeap::set_gc_cause(GCCause::Cause v) {
366   if (UsePerfData) {
367     _gc_lastcause = _gc_cause;
368     _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
369     _perf_gc_cause->set_value(GCCause::to_string(v));
370   }
371   _gc_cause = v;
372 }
373 
374 #ifndef PRODUCT
375 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
376   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
377     // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
378     for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
379       assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
380     }

191 }
192 
193 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
194   trace_heap(GCWhen::AfterGC, gc_tracer);
195 }
196 
197 // Default implementation, for collectors that don't support the feature.
198 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
199   return false;
200 }
201 
202 bool CollectedHeap::is_oop(oop object) const {
203   if (!is_object_aligned(object)) {
204     return false;
205   }
206 
207   if (!is_in(object)) {
208     return false;
209   }
210 
211   if (is_in(object->klass_or_null())) { //   if (object->klass_or_null() == NULL || is_in(object->klass_or_null())) ???
212     return false;
213   }
214 
215   return true;
216 }
217 
218 // Memory state functions.
219 
220 
221 CollectedHeap::CollectedHeap() :
222   _capacity_at_last_gc(0),
223   _used_at_last_gc(0),
224   _is_gc_active(false),
225   _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
226   _total_collections(0),
227   _total_full_collections(0),
228   _gc_cause(GCCause::_no_gc),
229   _gc_lastcause(GCCause::_no_gc)
230 {
231   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));

251   // Create the ring log
252   if (LogEvents) {
253     _gc_heap_log = new GCHeapLog();
254   } else {
255     _gc_heap_log = NULL;
256   }
257 }
258 
259 // This interface assumes that it's being called by the
260 // vm thread. It collects the heap assuming that the
261 // heap lock is already held and that we are executing in
262 // the context of the vm thread.
263 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
264   Thread* thread = Thread::current();
265   assert(thread->is_VM_thread(), "Precondition#1");
266   assert(Heap_lock->is_locked(), "Precondition#2");
267   GCCauseSetter gcs(this, cause);
268   switch (cause) {
269     case GCCause::_heap_inspection:
270     case GCCause::_heap_dump:
271     case GCCause::_codecache_GC_threshold:
272     case GCCause::_metadata_GC_threshold : {
273       HandleMark hm(thread);
274       do_full_collection(false);        // don't clear all soft refs
275       break;
276     }
277     case GCCause::_archive_time_gc:
278     case GCCause::_metadata_GC_clear_soft_refs: {
279       HandleMark hm(thread);
280       do_full_collection(true);         // do clear all soft refs
281       break;
282     }
283     default:
284       ShouldNotReachHere(); // Unexpected use of this function
285   }
286 }
287 
288 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
289                                                             size_t word_size,
290                                                             Metaspace::MetadataType mdtype) {
291   uint loop_count = 0;

342     VMThread::execute(&op);
343 
344     // If GC was locked out, try again. Check before checking success because the
345     // prologue could have succeeded and the GC still have been locked out.
346     if (op.gc_locked()) {
347       continue;
348     }
349 
350     if (op.prologue_succeeded()) {
351       return op.result();
352     }
353     loop_count++;
354     if ((QueuedAllocationWarningCount > 0) &&
355         (loop_count % QueuedAllocationWarningCount == 0)) {
356       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
357                             " size=" SIZE_FORMAT, loop_count, word_size);
358     }
359   } while (true);  // Until a GC is done
360 }
361 
362 void CollectedHeap::collect_for_codecache() {
363   uint loop_count = 0;
364   uint gc_count = 0;
365   uint full_gc_count = 0;
366 
367   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
368 
369   do {
370     if (GCLocker::is_active_and_needs_gc()) {
371       // If the GCLocker is active, just expand and allocate.
372       // If that does not succeed, wait if this thread is not
373       // in a critical section itself.
374       JavaThread* jthr = JavaThread::current();
375       if (!jthr->in_critical()) {
376         // Wait for JNI critical section to be exited
377         GCLocker::stall_until_clear();
378         // The GC invoked by the last thread leaving the critical
379         // section will be a young collection and a full collection
380         // is (currently) needed for unloading classes so continue
381         // to the next iteration to get a full GC.
382         continue;
383       } else {
384         if (CheckJNICalls) {
385           fatal("Possible deadlock due to allocating while"
386                 " in jni critical section");
387         }
388         return;
389       }
390     }
391 
392     {  // Need lock to get self consistent gc_count's
393       MutexLocker ml(Heap_lock);
394       gc_count      = Universe::heap()->total_collections();
395       full_gc_count = Universe::heap()->total_full_collections();
396     }
397 
398     // Generate a VM operation
399     VM_CollectForCodeCacheAllocation op(gc_count,
400                                         full_gc_count,
401                                         GCCause::_codecache_GC_threshold);
402     VMThread::execute(&op);
403 
404     // If GC was locked out, try again. Check before checking success because the
405     // prologue could have succeeded and the GC still have been locked out.
406     if (op.gc_locked()) {
407       continue;
408     }
409 
410     if (op.prologue_succeeded()) {
411       return;
412     }
413     loop_count++;
414     if ((QueuedAllocationWarningCount > 0) &&
415         (loop_count % QueuedAllocationWarningCount == 0)) {
416       log_warning(gc, ergo)("collect_for_codecache() retries %d times", loop_count);
417     }
418   } while (true);  // Until a GC is done
419 }
420 
421 MemoryUsage CollectedHeap::memory_usage() {
422   return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
423 }
424 
425 void CollectedHeap::set_gc_cause(GCCause::Cause v) {
426   if (UsePerfData) {
427     _gc_lastcause = _gc_cause;
428     _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
429     _perf_gc_cause->set_value(GCCause::to_string(v));
430   }
431   _gc_cause = v;
432 }
433 
434 #ifndef PRODUCT
435 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
436   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
437     // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
438     for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
439       assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
440     }
< prev index next >