< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page

 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeap.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "gc/shared/markBitMap.inline.hpp"
 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 33 #include "gc/shared/continuationGCSupport.inline.hpp"
 34 #include "gc/shared/suspendibleThreadSet.hpp"
 35 #include "gc/shared/tlab_globals.hpp"
 36 #include "gc/shenandoah/shenandoahAsserts.hpp"
 37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 38 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 39 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 40 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 43 #include "gc/shenandoah/shenandoahControlThread.hpp"
 44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"

 45 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 46 #include "oops/compressedOops.inline.hpp"
 47 #include "oops/oop.inline.hpp"
 48 #include "runtime/atomic.hpp"
 49 #include "runtime/javaThread.hpp"
 50 #include "runtime/prefetch.inline.hpp"
 51 #include "utilities/copy.hpp"
 52 #include "utilities/globalDefinitions.hpp"
 53 
 54 inline ShenandoahHeap* ShenandoahHeap::heap() {
 55   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 56 }
 57 
 58 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 59   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 60   // get_region() provides the bounds-check and returns NULL on OOB.
 61   return _heap->get_region(new_index - 1);
 62 }
 63 
 64 inline bool ShenandoahHeap::has_forwarded_objects() const {

280     // No GCLABs in this thread, fallback to shared allocation
281     return NULL;
282   }
283   HeapWord* obj = gclab->allocate(size);
284   if (obj != NULL) {
285     return obj;
286   }
287   // Otherwise...
288   return allocate_from_gclab_slow(thread, size);
289 }
290 
291 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
292   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
293     // This thread went through the OOM during evac protocol and it is safe to return
294     // the forward pointer. It must not attempt to evacuate any more.
295     return ShenandoahBarrierSet::resolve_forwarded(p);
296   }
297 
298   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
299 
300   size_t size = p->size();
301 
302   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
303 
304   bool alloc_from_gclab = true;
305   HeapWord* copy = NULL;
306 
307 #ifdef ASSERT
308   if (ShenandoahOOMDuringEvacALot &&
309       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
310         copy = NULL;
311   } else {
312 #endif
313     if (UseTLAB) {
314       copy = allocate_from_gclab(thread, size);
315     }
316     if (copy == NULL) {
317       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
318       copy = allocate_memory(req);
319       alloc_from_gclab = false;
320     }
321 #ifdef ASSERT
322   }
323 #endif
324 
325   if (copy == NULL) {
326     control_thread()->handle_alloc_failure_evac(size);
327 
328     _oom_evac_handler.handle_out_of_memory_during_evacuation();
329 
330     return ShenandoahBarrierSet::resolve_forwarded(p);
331   }
332 
333   // Copy the object:
334   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
335 
336   // Try to install the new forwarding pointer.
337   oop copy_val = cast_to_oop(copy);
338   ContinuationGCSupport::relativize_stack_chunk(copy_val);
339 





340   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
341   if (result == copy_val) {
342     // Successfully evacuated. Our copy is now the public one!
343     shenandoah_assert_correct(NULL, copy_val);
344     return copy_val;
345   }  else {
346     // Failed to evacuate. We need to deal with the object that is left behind. Since this
347     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
348     // But if it happens to contain references to evacuated regions, those references would
349     // not get updated for this stale copy during this cycle, and we will crash while scanning
350     // it the next cycle.
351     //
352     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
353     // object will overwrite this stale copy, or the filler object on LAB retirement will
354     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
355     // have to explicitly overwrite the copy with the filler object. With that overwrite,
356     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
357     if (alloc_from_gclab) {
358       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
359     } else {

499       assert(oopDesc::is_oop(obj), "sanity");
500       assert(ctx->is_marked(obj), "object expected to be marked");
501       cl->do_object(obj);
502       cb += skip_bitmap_delta;
503       if (cb < limit_bitmap) {
504         cb = ctx->get_next_marked_addr(cb, limit_bitmap);
505       }
506     }
507   }
508 
509   // Step 2. Accurate size-based traversal, happens past the TAMS.
510   // This restarts the scan at TAMS, which makes sure we traverse all objects,
511   // regardless of what happened at Step 1.
512   HeapWord* cs = tams;
513   while (cs < limit) {
514     assert (cs >= tams, "only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
515     assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
516     oop obj = cast_to_oop(cs);
517     assert(oopDesc::is_oop(obj), "sanity");
518     assert(ctx->is_marked(obj), "object expected to be marked");
519     size_t size = obj->size();
520     cl->do_object(obj);
521     cs += size;
522   }
523 }
524 
525 template <class T>
526 class ShenandoahObjectToOopClosure : public ObjectClosure {
527   T* _cl;
528 public:
529   ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
530 
531   void do_object(oop obj) {
532     obj->oop_iterate(_cl);
533   }
534 };
535 
536 template <class T>
537 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
538   T* _cl;
539   MemRegion _bounds;

 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeap.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "gc/shared/markBitMap.inline.hpp"
 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 33 #include "gc/shared/continuationGCSupport.inline.hpp"
 34 #include "gc/shared/suspendibleThreadSet.hpp"
 35 #include "gc/shared/tlab_globals.hpp"
 36 #include "gc/shenandoah/shenandoahAsserts.hpp"
 37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 38 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 39 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 40 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 43 #include "gc/shenandoah/shenandoahControlThread.hpp"
 44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 45 #include "gc/shenandoah/shenandoahObjectUtils.inline.hpp"
 46 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 47 #include "oops/compressedOops.inline.hpp"
 48 #include "oops/oop.inline.hpp"
 49 #include "runtime/atomic.hpp"
 50 #include "runtime/javaThread.hpp"
 51 #include "runtime/prefetch.inline.hpp"
 52 #include "utilities/copy.hpp"
 53 #include "utilities/globalDefinitions.hpp"
 54 
 55 inline ShenandoahHeap* ShenandoahHeap::heap() {
 56   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 57 }
 58 
 59 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 60   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 61   // get_region() provides the bounds-check and returns NULL on OOB.
 62   return _heap->get_region(new_index - 1);
 63 }
 64 
 65 inline bool ShenandoahHeap::has_forwarded_objects() const {

281     // No GCLABs in this thread, fallback to shared allocation
282     return NULL;
283   }
284   HeapWord* obj = gclab->allocate(size);
285   if (obj != NULL) {
286     return obj;
287   }
288   // Otherwise...
289   return allocate_from_gclab_slow(thread, size);
290 }
291 
292 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
293   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
294     // This thread went through the OOM during evac protocol and it is safe to return
295     // the forward pointer. It must not attempt to evacuate any more.
296     return ShenandoahBarrierSet::resolve_forwarded(p);
297   }
298 
299   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
300 
301   size_t size = ShenandoahObjectUtils::size(p);
302 
303   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
304 
305   bool alloc_from_gclab = true;
306   HeapWord* copy = NULL;
307 
308 #ifdef ASSERT
309   if (ShenandoahOOMDuringEvacALot &&
310       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
311         copy = NULL;
312   } else {
313 #endif
314     if (UseTLAB) {
315       copy = allocate_from_gclab(thread, size);
316     }
317     if (copy == NULL) {
318       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
319       copy = allocate_memory(req);
320       alloc_from_gclab = false;
321     }
322 #ifdef ASSERT
323   }
324 #endif
325 
326   if (copy == NULL) {
327     control_thread()->handle_alloc_failure_evac(size);
328 
329     _oom_evac_handler.handle_out_of_memory_during_evacuation();
330 
331     return ShenandoahBarrierSet::resolve_forwarded(p);
332   }
333 
334   // Copy the object:
335   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
336 
337   // Try to install the new forwarding pointer.
338   oop copy_val = cast_to_oop(copy);
339   if (!copy_val->mark().is_marked()) {
340     // If we copied a mark-word that indicates 'forwarded' state, then
341     // another thread beat us, and this new copy will never be published.
342     // ContinuationGCSupport would get a corrupt Klass* in that case,
343     // so don't even attempt it.
344     ContinuationGCSupport::relativize_stack_chunk(copy_val);
345   }
346   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
347   if (result == copy_val) {
348     // Successfully evacuated. Our copy is now the public one!
349     shenandoah_assert_correct(NULL, copy_val);
350     return copy_val;
351   }  else {
352     // Failed to evacuate. We need to deal with the object that is left behind. Since this
353     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
354     // But if it happens to contain references to evacuated regions, those references would
355     // not get updated for this stale copy during this cycle, and we will crash while scanning
356     // it the next cycle.
357     //
358     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
359     // object will overwrite this stale copy, or the filler object on LAB retirement will
360     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
361     // have to explicitly overwrite the copy with the filler object. With that overwrite,
362     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
363     if (alloc_from_gclab) {
364       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
365     } else {

505       assert(oopDesc::is_oop(obj), "sanity");
506       assert(ctx->is_marked(obj), "object expected to be marked");
507       cl->do_object(obj);
508       cb += skip_bitmap_delta;
509       if (cb < limit_bitmap) {
510         cb = ctx->get_next_marked_addr(cb, limit_bitmap);
511       }
512     }
513   }
514 
515   // Step 2. Accurate size-based traversal, happens past the TAMS.
516   // This restarts the scan at TAMS, which makes sure we traverse all objects,
517   // regardless of what happened at Step 1.
518   HeapWord* cs = tams;
519   while (cs < limit) {
520     assert (cs >= tams, "only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
521     assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
522     oop obj = cast_to_oop(cs);
523     assert(oopDesc::is_oop(obj), "sanity");
524     assert(ctx->is_marked(obj), "object expected to be marked");
525     size_t size = ShenandoahObjectUtils::size(obj);
526     cl->do_object(obj);
527     cs += size;
528   }
529 }
530 
531 template <class T>
532 class ShenandoahObjectToOopClosure : public ObjectClosure {
533   T* _cl;
534 public:
535   ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
536 
537   void do_object(oop obj) {
538     obj->oop_iterate(_cl);
539   }
540 };
541 
542 template <class T>
543 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
544   T* _cl;
545   MemRegion _bounds;
< prev index next >