< prev index next >

src/hotspot/share/gc/z/zBarrier.inline.hpp

Print this page




  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
  25 #define SHARE_GC_Z_ZBARRIER_INLINE_HPP
  26 
  27 #include "gc/z/zAddress.inline.hpp"
  28 #include "gc/z/zBarrier.hpp"
  29 #include "gc/z/zOop.inline.hpp"
  30 #include "gc/z/zResurrection.inline.hpp"
  31 #include "runtime/atomic.hpp"
  32 
  33 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
  34 inline oop ZBarrier::barrier(volatile oop* p, oop o) {
  35   uintptr_t addr = ZOop::to_address(o);
  36 
  37 retry:
  38   // Fast path
  39   if (fast_path(addr)) {
  40     return ZOop::from_address(addr);
  41   }
  42 
  43   // Slow path
  44   const uintptr_t good_addr = slow_path(addr);
  45 
  46   // Self heal, but only if the address was actually updated by the slow path,
  47   // which might not be the case, e.g. when marking through an already good oop.
  48   if (p != NULL && good_addr != addr) {
  49     const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr);
  50     if (prev_addr != addr) {
  51       // Some other thread overwrote the oop. If this oop was updated by a
  52       // weak barrier the new oop might not be good, in which case we need
  53       // to re-apply this barrier.
  54       addr = prev_addr;
  55       goto retry;
  56     }
  57   }
  58 
  59   return ZOop::from_address(good_addr);
  60 }
  61 
  62 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
  63 inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
  64   const uintptr_t addr = ZOop::to_address(o);
  65 
  66   // Fast path
  67   if (fast_path(addr)) {
  68     // Return the good address instead of the weak good address
  69     // to ensure that the currently active heap view is used.
  70     return ZOop::from_address(ZAddress::good_or_null(addr));
  71   }
  72 
  73   // Slow path
  74   uintptr_t good_addr = slow_path(addr);
  75 
  76   // Self heal unless the address returned from the slow path is null,
  77   // in which case resurrection was blocked and we must let the reference
  78   // processor clear the oop. Mutators are not allowed to clear oops in
  79   // these cases, since that would be similar to calling Reference.clear(),
  80   // which would make the reference non-discoverable or silently dropped
  81   // by the reference processor.
  82   if (p != NULL && good_addr != 0) {
  83     // The slow path returns a good/marked address, but we never mark oops
  84     // in a weak load barrier so we always self heal with the remapped address.
  85     const uintptr_t weak_good_addr = ZAddress::remapped(good_addr);
  86     const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr);
  87     if (prev_addr != addr) {
  88       // Some other thread overwrote the oop. The new
  89       // oop is guaranteed to be weak good or null.
  90       assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite");
  91 
  92       // Return the good address instead of the weak good address
  93       // to ensure that the currently active heap view is used.
  94       good_addr = ZAddress::good_or_null(prev_addr);
  95     }
  96   }
  97 
  98   return ZOop::from_address(good_addr);
  99 }
 100 
 101 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
 102 inline void ZBarrier::root_barrier(oop* p, oop o) {
 103   const uintptr_t addr = ZOop::to_address(o);
 104 
 105   // Fast path
 106   if (fast_path(addr)) {
 107     return;
 108   }
 109 
 110   // Slow path
 111   const uintptr_t good_addr = slow_path(addr);
 112 
 113   // Non-atomic healing helps speed up root scanning. This is safe to do
 114   // since we are always healing roots in a safepoint, or under a lock,
 115   // which ensures we are never racing with mutators modifying roots while
 116   // we are healing them. It's also safe in case multiple GC threads try
 117   // to heal the same root if it is aligned, since they would always heal
 118   // the root in the same way and it does not matter in which order it
 119   // happens. For misaligned oops, there needs to be mutual exclusion.
 120   *p = ZOop::from_address(good_addr);
 121 }
 122 
 123 inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
 124   return ZAddress::is_null(addr);
 125 }
 126 
 127 inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
 128   return ZAddress::is_good_or_null(addr);
 129 }
 130 
 131 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
 132   return ZAddress::is_weak_good_or_null(addr);
 133 }
 134 
 135 inline bool ZBarrier::is_resurrection_blocked(volatile oop* p, oop* o) {
 136   const bool is_blocked = ZResurrection::is_blocked();
 137 
 138   // Reload oop after checking the resurrection blocked state. This is
 139   // done to prevent a race where we first load an oop, which is logically
 140   // null but not yet cleared, then this oop is cleared by the reference




  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
  25 #define SHARE_GC_Z_ZBARRIER_INLINE_HPP
  26 
  27 #include "gc/z/zAddress.inline.hpp"
  28 #include "gc/z/zBarrier.hpp"
  29 #include "gc/z/zOop.inline.hpp"
  30 #include "gc/z/zResurrection.inline.hpp"
  31 #include "runtime/atomic.hpp"
  32 
  33 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
  34 inline oop ZBarrier::barrier(volatile oop* p, oop o) {
  35   uintptr_t addr = ZOop::to_address(o);
  36 
  37 retry:
  38   // Fast path
  39   if (fast_path(addr)) {
  40     return ZOop::to_oop(addr);
  41   }
  42 
  43   // Slow path
  44   const uintptr_t good_addr = slow_path(addr);
  45 
  46   // Self heal, but only if the address was actually updated by the slow path,
  47   // which might not be the case, e.g. when marking through an already good oop.
  48   if (p != NULL && good_addr != addr) {
  49     const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr);
  50     if (prev_addr != addr) {
  51       // Some other thread overwrote the oop. If this oop was updated by a
  52       // weak barrier the new oop might not be good, in which case we need
  53       // to re-apply this barrier.
  54       addr = prev_addr;
  55       goto retry;
  56     }
  57   }
  58 
  59   return ZOop::to_oop(good_addr);
  60 }
  61 
  62 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
  63 inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
  64   const uintptr_t addr = ZOop::to_address(o);
  65 
  66   // Fast path
  67   if (fast_path(addr)) {
  68     // Return the good address instead of the weak good address
  69     // to ensure that the currently active heap view is used.
  70     return ZOop::to_oop(ZAddress::good_or_null(addr));
  71   }
  72 
  73   // Slow path
  74   uintptr_t good_addr = slow_path(addr);
  75 
  76   // Self heal unless the address returned from the slow path is null,
  77   // in which case resurrection was blocked and we must let the reference
  78   // processor clear the oop. Mutators are not allowed to clear oops in
  79   // these cases, since that would be similar to calling Reference.clear(),
  80   // which would make the reference non-discoverable or silently dropped
  81   // by the reference processor.
  82   if (p != NULL && good_addr != 0) {
  83     // The slow path returns a good/marked address, but we never mark oops
  84     // in a weak load barrier so we always self heal with the remapped address.
  85     const uintptr_t weak_good_addr = ZAddress::remapped(good_addr);
  86     const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr);
  87     if (prev_addr != addr) {
  88       // Some other thread overwrote the oop. The new
  89       // oop is guaranteed to be weak good or null.
  90       assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite");
  91 
  92       // Return the good address instead of the weak good address
  93       // to ensure that the currently active heap view is used.
  94       good_addr = ZAddress::good_or_null(prev_addr);
  95     }
  96   }
  97 
  98   return ZOop::to_oop(good_addr);
  99 }
 100 
 101 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
 102 inline void ZBarrier::root_barrier(oop* p, oop o) {
 103   const uintptr_t addr = ZOop::to_address(o);
 104 
 105   // Fast path
 106   if (fast_path(addr)) {
 107     return;
 108   }
 109 
 110   // Slow path
 111   const uintptr_t good_addr = slow_path(addr);
 112 
 113   // Non-atomic healing helps speed up root scanning. This is safe to do
 114   // since we are always healing roots in a safepoint, or under a lock,
 115   // which ensures we are never racing with mutators modifying roots while
 116   // we are healing them. It's also safe in case multiple GC threads try
 117   // to heal the same root if it is aligned, since they would always heal
 118   // the root in the same way and it does not matter in which order it
 119   // happens. For misaligned oops, there needs to be mutual exclusion.
 120   *p = ZOop::to_oop(good_addr);
 121 }
 122 
 123 inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
 124   return ZAddress::is_null(addr);
 125 }
 126 
 127 inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
 128   return ZAddress::is_good_or_null(addr);
 129 }
 130 
 131 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
 132   return ZAddress::is_weak_good_or_null(addr);
 133 }
 134 
 135 inline bool ZBarrier::is_resurrection_blocked(volatile oop* p, oop* o) {
 136   const bool is_blocked = ZResurrection::is_blocked();
 137 
 138   // Reload oop after checking the resurrection blocked state. This is
 139   // done to prevent a race where we first load an oop, which is logically
 140   // null but not yet cleared, then this oop is cleared by the reference


< prev index next >