< prev index next >

src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp

Print this page

146     ResourceMark mark;
147     log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
148                                 nm->method()->name_and_sig_as_C_string(),
149                                 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
150                                 thread->name(), frame.sp(), nm->verified_entry_point());
151   }
152 
153   new_frame->sp = frame.sp();
154   new_frame->fp = frame.fp();
155   new_frame->lr = frame.pc();
156   new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
157 }
158 
159 static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) {
160   address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
161   NativeNMethodBarrier* barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address);
162   debug_only(barrier->verify());
163   return barrier;
164 }
165 

























166 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
167   if (!supports_entry_barrier(nm)) {
168     return;
169   }
170 
171   if (value == disarmed_guard_value()) {
172     // The patching epoch is incremented before the nmethod is disarmed. Disarming
173     // is performed with a release store. In the nmethod entry barrier, the values
174     // are read in the opposite order, such that the load of the nmethod guard
175     // acquires the patching epoch. This way, the guard is guaranteed to block
176     // entries to the nmethod, until it has safely published the requirement for
177     // further fencing by mutators, before they are allowed to enter.
178     BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
179     bs_asm->increment_patching_epoch();
180   }
181 
182   NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
183   barrier->set_value(nm, value);
184 }
185 
186 int BarrierSetNMethod::guard_value(nmethod* nm) {
187   if (!supports_entry_barrier(nm)) {
188     return disarmed_guard_value();
189   }
190 
191   NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
192   return barrier->get_value(nm);
193 }

146     ResourceMark mark;
147     log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
148                                 nm->method()->name_and_sig_as_C_string(),
149                                 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
150                                 thread->name(), frame.sp(), nm->verified_entry_point());
151   }
152 
153   new_frame->sp = frame.sp();
154   new_frame->fp = frame.fp();
155   new_frame->lr = frame.pc();
156   new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
157 }
158 
159 static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) {
160   address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
161   NativeNMethodBarrier* barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address);
162   debug_only(barrier->verify());
163   return barrier;
164 }
165 
166 static void set_value(nmethod* nm, jint val) {
167   NativeNMethodBarrier* cmp1 = native_nmethod_barrier(nm);
168   cmp1->set_value(nm, val);
169 
170   if (!nm->is_osr_method() && nm->method()->has_scalarized_args()) {
171     // nmethods with scalarized arguments have multiple entry points that each have an own nmethod entry barrier
172     assert(nm->verified_entry_point() != nm->verified_inline_entry_point(), "scalarized entry point not found");
173     address method_body = nm->is_compiled_by_c1() ? nm->verified_inline_entry_point() : nm->verified_entry_point();
174     address entry_point2 = nm->is_compiled_by_c1() ? nm->verified_entry_point() : nm->verified_inline_entry_point();
175 
176     int barrier_offset = reinterpret_cast<address>(cmp1) - method_body;
177     NativeNMethodBarrier* cmp2 = reinterpret_cast<NativeNMethodBarrier*>(entry_point2 + barrier_offset);
178     assert(cmp1 != cmp2, "sanity");
179     debug_only(cmp2->verify());
180     cmp2->set_value(nm, val);
181 
182     if (method_body != nm->verified_inline_ro_entry_point() && entry_point2 != nm->verified_inline_ro_entry_point()) {
183       NativeNMethodBarrier* cmp3 = reinterpret_cast<NativeNMethodBarrier*>(nm->verified_inline_ro_entry_point() + barrier_offset);
184       assert(cmp1 != cmp3 && cmp2 != cmp3, "sanity");
185       debug_only(cmp3->verify());
186       cmp3->set_value(nm, val);
187     }
188   }
189 }
190 
191 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
192   if (!supports_entry_barrier(nm)) {
193     return;
194   }
195 
196   if (value == disarmed_guard_value()) {
197     // The patching epoch is incremented before the nmethod is disarmed. Disarming
198     // is performed with a release store. In the nmethod entry barrier, the values
199     // are read in the opposite order, such that the load of the nmethod guard
200     // acquires the patching epoch. This way, the guard is guaranteed to block
201     // entries to the nmethod, until it has safely published the requirement for
202     // further fencing by mutators, before they are allowed to enter.
203     BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
204     bs_asm->increment_patching_epoch();
205   }
206 
207   set_value(nm, value);

208 }
209 
210 int BarrierSetNMethod::guard_value(nmethod* nm) {
211   if (!supports_entry_barrier(nm)) {
212     return disarmed_guard_value();
213   }
214 
215   NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
216   return barrier->get_value(nm);
217 }
< prev index next >