< prev index next >

src/hotspot/share/gc/shared/barrierSetNMethod.cpp

Print this page

160 #if (defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
161   // We clear the patching epoch when disarming nmethods, so that
162   // the counter won't overflow.
163   BarrierSetAssembler::clear_patching_epoch();
164 #endif
165 }
166 
167 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
168   // Enable WXWrite: the function is called directly from nmethod_entry_barrier
169   // stub.
170   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
171 
172   address return_address = *return_address_ptr;
173   AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address));
174   CodeBlob* cb = CodeCache::find_blob(return_address);
175   assert(cb != nullptr, "invariant");
176 
177   nmethod* nm = cb->as_nmethod();
178   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
179 

180   // Called upon first entry after being armed
181   bool may_enter = bs_nm->nmethod_entry_barrier(nm);
182   assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration");
183 
184   // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
185   // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
186   // code, where the existence of new instructions is communicated via data (the guard value).
187   // This cross modify fence is only needed when the nmethod entry barrier modifies the
188   // instructions. Not all platforms currently do that, so if this check becomes expensive,
189   // it can be made conditional on the nmethod_patching_type.
190   OrderAccess::cross_modify_fence();
191 
192   // Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
193   // a very rare event.
194   if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
195     static volatile uint32_t counter=0;
196     if (Atomic::add(&counter, 1u) % 10 == 0) {
197       may_enter = false;
198     }
199   }
200 
201   if (!may_enter) {


202     log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
203     bs_nm->deoptimize(nm, return_address_ptr);
204   }
205   return may_enter ? 0 : 1;
206 }
207 
208 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
209   assert(nm->is_osr_method(), "Should not reach here");
210   log_trace(nmethod, barrier)("Running osr nmethod entry barrier: " PTR_FORMAT, p2i(nm));
211   bool result = nmethod_entry_barrier(nm);



212   OrderAccess::cross_modify_fence();
213   return result;
214 }
215 
216 oop BarrierSetNMethod::oop_load_no_keepalive(const nmethod* nm, int index) {
217   return NativeAccess<AS_NO_KEEPALIVE>::oop_load(nm->oop_addr_at(index));
218 }
219 
220 oop BarrierSetNMethod::oop_load_phantom(const nmethod* nm, int index) {
221   return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(nm->oop_addr_at(index));
222 }

160 #if (defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
161   // We clear the patching epoch when disarming nmethods, so that
162   // the counter won't overflow.
163   BarrierSetAssembler::clear_patching_epoch();
164 #endif
165 }
166 
167 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
168   // Enable WXWrite: the function is called directly from nmethod_entry_barrier
169   // stub.
170   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
171 
172   address return_address = *return_address_ptr;
173   AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address));
174   CodeBlob* cb = CodeCache::find_blob(return_address);
175   assert(cb != nullptr, "invariant");
176 
177   nmethod* nm = cb->as_nmethod();
178   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
179 
180   log_trace(nmethod, barrier)("Running nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
181   // Called upon first entry after being armed
182   bool may_enter = bs_nm->nmethod_entry_barrier(nm);
183   assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration");
184 
185   // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
186   // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
187   // code, where the existence of new instructions is communicated via data (the guard value).
188   // This cross modify fence is only needed when the nmethod entry barrier modifies the
189   // instructions. Not all platforms currently do that, so if this check becomes expensive,
190   // it can be made conditional on the nmethod_patching_type.
191   OrderAccess::cross_modify_fence();
192 
193   // Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
194   // a very rare event.
195   if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
196     static volatile uint32_t counter=0;
197     if (Atomic::add(&counter, 1u) % 10 == 0) {
198       may_enter = false;
199     }
200   }
201 
202   if (may_enter) {
203     nm->set_used();
204   } else {
205     log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
206     bs_nm->deoptimize(nm, return_address_ptr);
207   }
208   return may_enter ? 0 : 1;
209 }
210 
211 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
212   assert(nm->is_osr_method(), "Should not reach here");
213   log_trace(nmethod, barrier)("Running osr nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
214   bool result = nmethod_entry_barrier(nm);
215   if (result) {
216     nm->set_used();
217   }
218   OrderAccess::cross_modify_fence();
219   return result;
220 }
221 
222 oop BarrierSetNMethod::oop_load_no_keepalive(const nmethod* nm, int index) {
223   return NativeAccess<AS_NO_KEEPALIVE>::oop_load(nm->oop_addr_at(index));
224 }
225 
226 oop BarrierSetNMethod::oop_load_phantom(const nmethod* nm, int index) {
227   return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(nm->oop_addr_at(index));
228 }
< prev index next >