< prev index next >

src/hotspot/share/gc/shared/barrierSetNMethod.cpp

Print this page

164   }
165   BarrierSetNMethodArmClosure cl(_current_phase);
166   Threads::threads_do(&cl);
167 
168 #if (defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
169   // We clear the patching epoch when disarming nmethods, so that
170   // the counter won't overflow.
171   BarrierSetAssembler::clear_patching_epoch();
172 #endif
173 }
174 
175 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
176   address return_address = *return_address_ptr;
177   AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address));
178   CodeBlob* cb = CodeCache::find_blob(return_address);
179   assert(cb != nullptr, "invariant");
180 
181   nmethod* nm = cb->as_nmethod();
182   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
183 

184   // Called upon first entry after being armed
185   bool may_enter = !bs_nm->is_not_entrant(nm) && bs_nm->nmethod_entry_barrier(nm);
186   assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration");
187 
188   if (may_enter) {
189     // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
190     // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
191     // code, where the existence of new instructions is communicated via data (the guard value).
192     // This cross modify fence is only needed when the nmethod entry barrier modifies the
193     // instructions. Not all platforms currently do that, so if this check becomes expensive,
194     // it can be made conditional on the nmethod_patching_type.
195     OrderAccess::cross_modify_fence();
196 
197     // Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
198     // a very rare event.
199     if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
200       static Atomic<uint32_t> counter{0};
201       if (counter.add_then_fetch(1u) % 10 == 0) {
202         may_enter = false;
203       }
204     }
205   }
206 
207   if (!may_enter) {



208     log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
209     bs_nm->deoptimize(nm, return_address_ptr);
210   }
211   return may_enter ? 0 : 1;
212 }
213 
214 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
215   assert(nm->is_osr_method(), "Should not reach here");
216   log_trace(nmethod, barrier)("Running osr nmethod entry barrier: " PTR_FORMAT, p2i(nm));
217   bool result = nmethod_entry_barrier(nm);



218   OrderAccess::cross_modify_fence();
219   return result;
220 }
221 
222 oop BarrierSetNMethod::oop_load_no_keepalive(const nmethod* nm, int index) {
223   return NativeAccess<AS_NO_KEEPALIVE>::oop_load(nm->oop_addr_at(index));
224 }
225 
226 oop BarrierSetNMethod::oop_load_phantom(const nmethod* nm, int index) {
227   return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(nm->oop_addr_at(index));
228 }
229 
230 // Make the nmethod permanently not-entrant, so that nmethod_stub_entry_barrier() will call
231 // deoptimize() to redirect the caller to SharedRuntime::get_handle_wrong_method_stub().
232 // A sticky armed bit is set and other bits are preserved.  As a result, a call to
233 // nmethod_stub_entry_barrier() may appear to be spurious, because is_armed() still returns
234 // false and nmethod_entry_barrier() is not called.
235 void BarrierSetNMethod::make_not_entrant(nmethod* nm) {
236   set_guard_value(nm, not_entrant, not_entrant);
237 }

164   }
165   BarrierSetNMethodArmClosure cl(_current_phase);
166   Threads::threads_do(&cl);
167 
168 #if (defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
169   // We clear the patching epoch when disarming nmethods, so that
170   // the counter won't overflow.
171   BarrierSetAssembler::clear_patching_epoch();
172 #endif
173 }
174 
175 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
176   address return_address = *return_address_ptr;
177   AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address));
178   CodeBlob* cb = CodeCache::find_blob(return_address);
179   assert(cb != nullptr, "invariant");
180 
181   nmethod* nm = cb->as_nmethod();
182   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
183 
184   log_trace(nmethod, barrier)("Running nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
185   // Called upon first entry after being armed
186   bool may_enter = !bs_nm->is_not_entrant(nm) && bs_nm->nmethod_entry_barrier(nm);
187   assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration");
188 
189   if (may_enter) {
190     // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
191     // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
192     // code, where the existence of new instructions is communicated via data (the guard value).
193     // This cross modify fence is only needed when the nmethod entry barrier modifies the
194     // instructions. Not all platforms currently do that, so if this check becomes expensive,
195     // it can be made conditional on the nmethod_patching_type.
196     OrderAccess::cross_modify_fence();
197 
198     // Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
199     // a very rare event.
200     if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
201       static Atomic<uint32_t> counter{0};
202       if (counter.add_then_fetch(1u) % 10 == 0) {
203         may_enter = false;
204       }
205     }
206   }
207 
208   if (may_enter) {
209     MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
210     nm->set_used();
211   } else {
212     log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
213     bs_nm->deoptimize(nm, return_address_ptr);
214   }
215   return may_enter ? 0 : 1;
216 }
217 
218 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
219   assert(nm->is_osr_method(), "Should not reach here");
220   log_trace(nmethod, barrier)("Running osr nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
221   bool result = nmethod_entry_barrier(nm);
222   if (result) {
223     nm->set_used();
224   }
225   OrderAccess::cross_modify_fence();
226   return result;
227 }
228 
229 oop BarrierSetNMethod::oop_load_no_keepalive(const nmethod* nm, int index) {
230   return NativeAccess<AS_NO_KEEPALIVE>::oop_load(nm->oop_addr_at(index));
231 }
232 
233 oop BarrierSetNMethod::oop_load_phantom(const nmethod* nm, int index) {
234   return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(nm->oop_addr_at(index));
235 }
236 
237 // Make the nmethod permanently not-entrant, so that nmethod_stub_entry_barrier() will call
238 // deoptimize() to redirect the caller to SharedRuntime::get_handle_wrong_method_stub().
239 // A sticky armed bit is set and other bits are preserved.  As a result, a call to
240 // nmethod_stub_entry_barrier() may appear to be spurious, because is_armed() still returns
241 // false and nmethod_entry_barrier() is not called.
242 void BarrierSetNMethod::make_not_entrant(nmethod* nm) {
243   set_guard_value(nm, not_entrant, not_entrant);
244 }
< prev index next >