< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page




  99 inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) {
 100   T o = RawAccess<>::oop_load(p);
 101   if (!CompressedOops::is_null(o)) {
 102     oop obj = CompressedOops::decode_not_null(o);
 103     return maybe_update_with_forwarded_not_null(p, obj);
 104   } else {
 105     return NULL;
 106   }
 107 }
 108 
 109 template <class T>
 110 inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) {
 111   T o = RawAccess<>::oop_load(p);
 112   if (!CompressedOops::is_null(o)) {
 113     oop heap_oop = CompressedOops::decode_not_null(o);
 114     if (in_collection_set(heap_oop)) {
 115       oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
 116       if (oopDesc::equals_raw(forwarded_oop, heap_oop)) {
 117         forwarded_oop = evacuate_object(heap_oop, Thread::current());
 118       }
 119       oop prev = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 120       if (oopDesc::equals_raw(prev, heap_oop)) {
 121         return forwarded_oop;
 122       } else {
 123         return NULL;
 124       }
 125     }
 126     return heap_oop;
 127   } else {
 128     return NULL;
 129   }
 130 }
 131 
 132 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) {
 133   return (oop) Atomic::cmpxchg(n, addr, c);
 134 }
 135 
 136 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) {
 137   narrowOop cmp = CompressedOops::encode(c);
 138   narrowOop val = CompressedOops::encode(n);
 139   return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, cmp));
 140 }
 141 
 142 template <class T>
 143 inline oop ShenandoahHeap::maybe_update_with_forwarded_not_null(T* p, oop heap_oop) {
 144   shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || is_full_gc_in_progress() || is_degenerated_gc_in_progress());
 145   shenandoah_assert_correct(p, heap_oop);
 146 
 147   if (in_collection_set(heap_oop)) {
 148     oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
 149     if (oopDesc::equals_raw(forwarded_oop, heap_oop)) {
 150       // E.g. during evacuation.
 151       return forwarded_oop;
 152     }
 153 
 154     shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress() || is_degenerated_gc_in_progress());

 155     shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_gc());
 156 
 157     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 158     // reference be updated later.
 159     oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 160 
 161     if (oopDesc::equals_raw(result, heap_oop)) { // CAS successful.
 162       return forwarded_oop;











 163     } else {
 164       // Note: we used to assert the following here. This doesn't work because sometimes, during
 165       // marking/updating-refs, it can happen that a Java thread beats us with an arraycopy,
 166       // which first copies the array, which potentially contains from-space refs, and only afterwards
 167       // updates all from-space refs to to-space refs, which leaves a short window where the new array
 168       // elements can be from-space.
 169       // assert(CompressedOops::is_null(result) ||
 170       //        oopDesc::equals_raw(result, ShenandoahBarrierSet::resolve_oop_static_not_null(result)),
 171       //       "expect not forwarded");
 172       return NULL;
 173     }
 174   } else {
 175     shenandoah_assert_not_forwarded(p, heap_oop);
 176     return heap_oop;
 177   }
 178 }
 179 
 180 inline bool ShenandoahHeap::cancelled_gc() const {
 181   return _cancelled_gc.get() == CANCELLED;
 182 }
 183 
 184 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
 185   if (! (sts_active && ShenandoahSuspendibleWorkers)) {
 186     return cancelled_gc();
 187   }
 188 
 189   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
 190   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
 191     if (SuspendibleThreadSet::should_yield()) {
 192       SuspendibleThreadSet::yield();




  99 inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) {
 100   T o = RawAccess<>::oop_load(p);
 101   if (!CompressedOops::is_null(o)) {
 102     oop obj = CompressedOops::decode_not_null(o);
 103     return maybe_update_with_forwarded_not_null(p, obj);
 104   } else {
 105     return NULL;
 106   }
 107 }
 108 
 109 template <class T>
 110 inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) {
 111   T o = RawAccess<>::oop_load(p);
 112   if (!CompressedOops::is_null(o)) {
 113     oop heap_oop = CompressedOops::decode_not_null(o);
 114     if (in_collection_set(heap_oop)) {
 115       oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
 116       if (oopDesc::equals_raw(forwarded_oop, heap_oop)) {
 117         forwarded_oop = evacuate_object(heap_oop, Thread::current());
 118       }
 119       oop prev = cas_oop(forwarded_oop, p, heap_oop);
 120       if (oopDesc::equals_raw(prev, heap_oop)) {
 121         return forwarded_oop;
 122       } else {
 123         return NULL;
 124       }
 125     }
 126     return heap_oop;
 127   } else {
 128     return NULL;
 129   }
 130 }
 131 
 132 inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) {
 133   return (oop) Atomic::cmpxchg(n, addr, c);
 134 }
 135 
 136 inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) {
 137   narrowOop cmp = CompressedOops::encode(c);
 138   narrowOop val = CompressedOops::encode(n);
 139   return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, cmp));
 140 }
 141 
 142 template <class T>
 143 inline oop ShenandoahHeap::maybe_update_with_forwarded_not_null(T* p, oop heap_oop) {
 144   shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || is_full_gc_in_progress() || is_degenerated_gc_in_progress());
 145   shenandoah_assert_correct(p, heap_oop);
 146 
 147   if (in_collection_set(heap_oop)) {
 148     oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
 149     if (oopDesc::equals_raw(forwarded_oop, heap_oop)) {
 150       // E.g. during evacuation.
 151       return forwarded_oop;
 152     }
 153 
 154     shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress() || is_degenerated_gc_in_progress());
 155     shenandoah_assert_not_forwarded(p, forwarded_oop);
 156     shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_gc());
 157 
 158     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 159     // reference be updated later.
 160     oop witness = cas_oop(forwarded_oop, p, heap_oop);
 161 
 162     if (!oopDesc::equals_raw(witness, heap_oop)) {
 163       // CAS failed, someone had beat us to it. Normally, we would return the failure witness,
 164       // because that would be the proper write of to-space object, enforced by strong barriers.
 165       // However, there is a corner case with arraycopy. It can happen that a Java thread
 166       // beats us with an arraycopy, which first copies the array, which potentially contains
 167       // from-space refs, and only afterwards updates all from-space refs to to-space refs,
 168       // which leaves a short window where the new array elements can be from-space.
 169       // In this case, we can just resolve the result again. As we resolve, we need to consider
 170       // the contended write might have been NULL.
 171       oop result = ShenandoahBarrierSet::resolve_forwarded(witness);
 172       shenandoah_assert_not_forwarded_except(p, result, (result == NULL));
 173       shenandoah_assert_not_in_cset_except(p, result, (result == NULL) || cancelled_gc());
 174       return result;
 175     } else {
 176       // Success! We have updated with known to-space copy. We have already asserted it is sane.
 177       return forwarded_oop;







 178     }
 179   } else {
 180     shenandoah_assert_not_forwarded(p, heap_oop);
 181     return heap_oop;
 182   }
 183 }
 184 
 185 inline bool ShenandoahHeap::cancelled_gc() const {
 186   return _cancelled_gc.get() == CANCELLED;
 187 }
 188 
 189 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
 190   if (! (sts_active && ShenandoahSuspendibleWorkers)) {
 191     return cancelled_gc();
 192   }
 193 
 194   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
 195   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
 196     if (SuspendibleThreadSet::should_yield()) {
 197       SuspendibleThreadSet::yield();


< prev index next >