174 #if (defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
175 // We clear the patching epoch when disarming nmethods, so that
176 // the counter won't overflow.
177 BarrierSetAssembler::clear_patching_epoch();
178 #endif
179 }
180
181 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
182 // Enable WXWrite: the function is called directly from nmethod_entry_barrier
183 // stub.
184 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
185
186 address return_address = *return_address_ptr;
187 AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address));
188 CodeBlob* cb = CodeCache::find_blob(return_address);
189 assert(cb != nullptr, "invariant");
190
191 nmethod* nm = cb->as_nmethod();
192 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
193
194 // Called upon first entry after being armed
195 bool may_enter = !bs_nm->is_not_entrant(nm) && bs_nm->nmethod_entry_barrier(nm);
196 assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration");
197
198 if (may_enter) {
199 // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
200 // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
201 // code, where the existence of new instructions is communicated via data (the guard value).
202 // This cross modify fence is only needed when the nmethod entry barrier modifies the
203 // instructions. Not all platforms currently do that, so if this check becomes expensive,
204 // it can be made conditional on the nmethod_patching_type.
205 OrderAccess::cross_modify_fence();
206
207 // Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
208 // a very rare event.
209 if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
210 static volatile uint32_t counter=0;
211 if (Atomic::add(&counter, 1u) % 10 == 0) {
212 may_enter = false;
213 }
214 }
215 }
216
217 if (!may_enter) {
218 log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
219 bs_nm->deoptimize(nm, return_address_ptr);
220 }
221 return may_enter ? 0 : 1;
222 }
223
224 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
225 assert(nm->is_osr_method(), "Should not reach here");
226 log_trace(nmethod, barrier)("Running osr nmethod entry barrier: " PTR_FORMAT, p2i(nm));
227 bool result = nmethod_entry_barrier(nm);
228 OrderAccess::cross_modify_fence();
229 return result;
230 }
231
232 oop BarrierSetNMethod::oop_load_no_keepalive(const nmethod* nm, int index) {
233 return NativeAccess<AS_NO_KEEPALIVE>::oop_load(nm->oop_addr_at(index));
234 }
235
236 oop BarrierSetNMethod::oop_load_phantom(const nmethod* nm, int index) {
237 return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(nm->oop_addr_at(index));
238 }
239
240 // Make the nmethod permanently not-entrant, so that nmethod_stub_entry_barrier() will call
241 // deoptimize() to redirect the caller to SharedRuntime::get_handle_wrong_method_stub().
242 // A sticky armed bit is set and other bits are preserved. As a result, a call to
243 // nmethod_stub_entry_barrier() may appear to be spurious, because is_armed() still returns
244 // false and nmethod_entry_barrier() is not called.
245 void BarrierSetNMethod::make_not_entrant(nmethod* nm) {
246 // Enter critical section. Does not block for safepoint.
247 ConditionalMutexLocker ml(NMethodEntryBarrier_lock, !NMethodEntryBarrier_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
|
174 #if (defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
175 // We clear the patching epoch when disarming nmethods, so that
176 // the counter won't overflow.
177 BarrierSetAssembler::clear_patching_epoch();
178 #endif
179 }
180
181 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
182 // Enable WXWrite: the function is called directly from nmethod_entry_barrier
183 // stub.
184 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
185
186 address return_address = *return_address_ptr;
187 AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address));
188 CodeBlob* cb = CodeCache::find_blob(return_address);
189 assert(cb != nullptr, "invariant");
190
191 nmethod* nm = cb->as_nmethod();
192 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
193
194 log_trace(nmethod, barrier)("Running nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
195 // Called upon first entry after being armed
196 bool may_enter = !bs_nm->is_not_entrant(nm) && bs_nm->nmethod_entry_barrier(nm);
197 assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration");
198
199 if (may_enter) {
200 // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
201 // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
202 // code, where the existence of new instructions is communicated via data (the guard value).
203 // This cross modify fence is only needed when the nmethod entry barrier modifies the
204 // instructions. Not all platforms currently do that, so if this check becomes expensive,
205 // it can be made conditional on the nmethod_patching_type.
206 OrderAccess::cross_modify_fence();
207
208 // Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
209 // a very rare event.
210 if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
211 static volatile uint32_t counter=0;
212 if (Atomic::add(&counter, 1u) % 10 == 0) {
213 may_enter = false;
214 }
215 }
216 }
217
218 if (may_enter) {
219 nm->set_used();
220 } else {
221 log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
222 bs_nm->deoptimize(nm, return_address_ptr);
223 }
224 return may_enter ? 0 : 1;
225 }
226
227 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
228 assert(nm->is_osr_method(), "Should not reach here");
229 log_trace(nmethod, barrier)("Running osr nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
230 bool result = nmethod_entry_barrier(nm);
231 if (result) {
232 nm->set_used();
233 }
234 OrderAccess::cross_modify_fence();
235 return result;
236 }
237
238 oop BarrierSetNMethod::oop_load_no_keepalive(const nmethod* nm, int index) {
239 return NativeAccess<AS_NO_KEEPALIVE>::oop_load(nm->oop_addr_at(index));
240 }
241
242 oop BarrierSetNMethod::oop_load_phantom(const nmethod* nm, int index) {
243 return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(nm->oop_addr_at(index));
244 }
245
246 // Make the nmethod permanently not-entrant, so that nmethod_stub_entry_barrier() will call
247 // deoptimize() to redirect the caller to SharedRuntime::get_handle_wrong_method_stub().
248 // A sticky armed bit is set and other bits are preserved. As a result, a call to
249 // nmethod_stub_entry_barrier() may appear to be spurious, because is_armed() still returns
250 // false and nmethod_entry_barrier() is not called.
251 void BarrierSetNMethod::make_not_entrant(nmethod* nm) {
252 // Enter critical section. Does not block for safepoint.
253 ConditionalMutexLocker ml(NMethodEntryBarrier_lock, !NMethodEntryBarrier_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
|