160 }
161
162 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
163 // Enable WXWrite: the function is called directly from nmethod_entry_barrier
164 // stub.
165 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
166
167 address return_address = *return_address_ptr;
168 AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address));
169 CodeBlob* cb = CodeCache::find_blob(return_address);
170 assert(cb != nullptr, "invariant");
171
172 nmethod* nm = cb->as_nmethod();
173 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
174
175 if (!bs_nm->is_armed(nm)) {
176 return 0;
177 }
178
179 assert(!nm->is_osr_method(), "Should not reach here");
180 // Called upon first entry after being armed
181 bool may_enter = bs_nm->nmethod_entry_barrier(nm);
182
183 // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
184 // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
185 // code, where the existence of new instructions is communicated via data (the guard value).
186 // This cross modify fence is only needed when the nmethod entry barrier modifies the
187 // instructions. Not all platforms currently do that, so if this check becomes expensive,
188 // it can be made conditional on the nmethod_patching_type.
189 OrderAccess::cross_modify_fence();
190
191 // Diagnostic option to force deoptimization 1 in 3 times. It is otherwise
192 // a very rare event.
193 if (DeoptimizeNMethodBarriersALot) {
194 static volatile uint32_t counter=0;
195 if (Atomic::add(&counter, 1u) % 3 == 0) {
196 may_enter = false;
197 }
198 }
199
200 if (!may_enter) {
201 log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
202 bs_nm->deoptimize(nm, return_address_ptr);
203 }
204 return may_enter ? 0 : 1;
205 }
206
207 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
208 // This check depends on the invariant that all nmethods that are deoptimized / made not entrant
209 // are NOT disarmed.
210 // This invariant is important because a method can be deoptimized after the method have been
211 // resolved / looked up by OSR by another thread. By not deoptimizing them we guarantee that
212 // a deoptimized method will always hit the barrier and come to the same conclusion - deoptimize
213 if (!is_armed(nm)) {
214 return true;
215 }
216
217 assert(nm->is_osr_method(), "Should not reach here");
218 log_trace(nmethod, barrier)("Running osr nmethod entry barrier: " PTR_FORMAT, p2i(nm));
219 bool result = nmethod_entry_barrier(nm);
220 OrderAccess::cross_modify_fence();
221 return result;
222 }
|
160 }
161
162 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
163 // Enable WXWrite: the function is called directly from nmethod_entry_barrier
164 // stub.
165 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
166
167 address return_address = *return_address_ptr;
168 AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address));
169 CodeBlob* cb = CodeCache::find_blob(return_address);
170 assert(cb != nullptr, "invariant");
171
172 nmethod* nm = cb->as_nmethod();
173 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
174
175 if (!bs_nm->is_armed(nm)) {
176 return 0;
177 }
178
179 assert(!nm->is_osr_method(), "Should not reach here");
180 log_trace(nmethod, barrier)("Running nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
181 // Called upon first entry after being armed
182 bool may_enter = bs_nm->nmethod_entry_barrier(nm);
183
184 // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
185 // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
186 // code, where the existence of new instructions is communicated via data (the guard value).
187 // This cross modify fence is only needed when the nmethod entry barrier modifies the
188 // instructions. Not all platforms currently do that, so if this check becomes expensive,
189 // it can be made conditional on the nmethod_patching_type.
190 OrderAccess::cross_modify_fence();
191
192 // Diagnostic option to force deoptimization 1 in 3 times. It is otherwise
193 // a very rare event.
194 if (DeoptimizeNMethodBarriersALot) {
195 static volatile uint32_t counter=0;
196 if (Atomic::add(&counter, 1u) % 3 == 0) {
197 may_enter = false;
198 }
199 }
200
201 if (may_enter) {
202 nm->set_used();
203 } else {
204 log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
205 bs_nm->deoptimize(nm, return_address_ptr);
206 }
207 return may_enter ? 0 : 1;
208 }
209
210 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
211 // This check depends on the invariant that all nmethods that are deoptimized / made not entrant
212 // are NOT disarmed.
213 // This invariant is important because a method can be deoptimized after the method have been
214 // resolved / looked up by OSR by another thread. By not deoptimizing them we guarantee that
215 // a deoptimized method will always hit the barrier and come to the same conclusion - deoptimize
216 if (!is_armed(nm)) {
217 return true;
218 }
219
220 assert(nm->is_osr_method(), "Should not reach here");
221 log_trace(nmethod, barrier)("Running osr nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
222 bool result = nmethod_entry_barrier(nm);
223 if (result) {
224 nm->set_used();
225 }
226 OrderAccess::cross_modify_fence();
227 return result;
228 }
|