< prev index next >

src/hotspot/share/gc/shared/barrierSetNMethod.cpp

Print this page

163   }
164   BarrierSetNMethodArmClosure cl(_current_phase);
165   Threads::threads_do(&cl);
166 
167 #if (defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
168   // We clear the patching epoch when disarming nmethods, so that
169   // the counter won't overflow.
170   BarrierSetAssembler::clear_patching_epoch();
171 #endif
172 }
173 
174 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
175   address return_address = *return_address_ptr;
176   AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address));
177   CodeBlob* cb = CodeCache::find_blob(return_address);
178   assert(cb != nullptr, "invariant");
179 
180   nmethod* nm = cb->as_nmethod();
181   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
182 

183   // Called upon first entry after being armed
184   bool may_enter = !bs_nm->is_not_entrant(nm) && bs_nm->nmethod_entry_barrier(nm);
185   assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration");
186 
187   if (may_enter) {
188     // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
189     // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
190     // code, where the existence of new instructions is communicated via data (the guard value).
191     // This cross modify fence is only needed when the nmethod entry barrier modifies the
192     // instructions. Not all platforms currently do that, so if this check becomes expensive,
193     // it can be made conditional on the nmethod_patching_type.
194     OrderAccess::cross_modify_fence();
195 
196     // Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
197     // a very rare event.
198     if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
199       static volatile uint32_t counter=0;
200       if (AtomicAccess::add(&counter, 1u) % 10 == 0) {
201         may_enter = false;
202       }
203     }
204   }
205 
206   if (!may_enter) {



207     log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
208     bs_nm->deoptimize(nm, return_address_ptr);
209   }
210   return may_enter ? 0 : 1;
211 }
212 
213 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
214   assert(nm->is_osr_method(), "Should not reach here");
215   log_trace(nmethod, barrier)("Running osr nmethod entry barrier: " PTR_FORMAT, p2i(nm));
216   bool result = nmethod_entry_barrier(nm);



217   OrderAccess::cross_modify_fence();
218   return result;
219 }
220 
221 oop BarrierSetNMethod::oop_load_no_keepalive(const nmethod* nm, int index) {
222   return NativeAccess<AS_NO_KEEPALIVE>::oop_load(nm->oop_addr_at(index));
223 }
224 
225 oop BarrierSetNMethod::oop_load_phantom(const nmethod* nm, int index) {
226   return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(nm->oop_addr_at(index));
227 }
228 
229 // Make the nmethod permanently not-entrant, so that nmethod_stub_entry_barrier() will call
230 // deoptimize() to redirect the caller to SharedRuntime::get_handle_wrong_method_stub().
231 // A sticky armed bit is set and other bits are preserved.  As a result, a call to
232 // nmethod_stub_entry_barrier() may appear to be spurious, because is_armed() still returns
233 // false and nmethod_entry_barrier() is not called.
234 void BarrierSetNMethod::make_not_entrant(nmethod* nm) {
235   set_guard_value(nm, not_entrant, not_entrant);
236 }

163   }
164   BarrierSetNMethodArmClosure cl(_current_phase);
165   Threads::threads_do(&cl);
166 
167 #if (defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
168   // We clear the patching epoch when disarming nmethods, so that
169   // the counter won't overflow.
170   BarrierSetAssembler::clear_patching_epoch();
171 #endif
172 }
173 
174 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
175   address return_address = *return_address_ptr;
176   AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address));
177   CodeBlob* cb = CodeCache::find_blob(return_address);
178   assert(cb != nullptr, "invariant");
179 
180   nmethod* nm = cb->as_nmethod();
181   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
182 
183   log_trace(nmethod, barrier)("Running nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
184   // Called upon first entry after being armed
185   bool may_enter = !bs_nm->is_not_entrant(nm) && bs_nm->nmethod_entry_barrier(nm);
186   assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration");
187 
188   if (may_enter) {
189     // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
190     // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
191     // code, where the existence of new instructions is communicated via data (the guard value).
192     // This cross modify fence is only needed when the nmethod entry barrier modifies the
193     // instructions. Not all platforms currently do that, so if this check becomes expensive,
194     // it can be made conditional on the nmethod_patching_type.
195     OrderAccess::cross_modify_fence();
196 
197     // Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
198     // a very rare event.
199     if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
200       static volatile uint32_t counter=0;
201       if (AtomicAccess::add(&counter, 1u) % 10 == 0) {
202         may_enter = false;
203       }
204     }
205   }
206 
207   if (may_enter) {
208     MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
209     nm->set_used();
210   } else {
211     log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
212     bs_nm->deoptimize(nm, return_address_ptr);
213   }
214   return may_enter ? 0 : 1;
215 }
216 
217 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
218   assert(nm->is_osr_method(), "Should not reach here");
219   log_trace(nmethod, barrier)("Running osr nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
220   bool result = nmethod_entry_barrier(nm);
221   if (result) {
222     nm->set_used();
223   }
224   OrderAccess::cross_modify_fence();
225   return result;
226 }
227 
228 oop BarrierSetNMethod::oop_load_no_keepalive(const nmethod* nm, int index) {
229   return NativeAccess<AS_NO_KEEPALIVE>::oop_load(nm->oop_addr_at(index));
230 }
231 
232 oop BarrierSetNMethod::oop_load_phantom(const nmethod* nm, int index) {
233   return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(nm->oop_addr_at(index));
234 }
235 
236 // Make the nmethod permanently not-entrant, so that nmethod_stub_entry_barrier() will call
237 // deoptimize() to redirect the caller to SharedRuntime::get_handle_wrong_method_stub().
238 // A sticky armed bit is set and other bits are preserved.  As a result, a call to
239 // nmethod_stub_entry_barrier() may appear to be spurious, because is_armed() still returns
240 // false and nmethod_entry_barrier() is not called.
241 void BarrierSetNMethod::make_not_entrant(nmethod* nm) {
242   set_guard_value(nm, not_entrant, not_entrant);
243 }
< prev index next >