< prev index next >

src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp

Print this page

 51 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
 52 // not find the expected native instruction at this offset, which needs updating.
 53 // Note that this offset is invariant of PreserveFramePointer.
 54 static int entry_barrier_offset(nmethod* nm) {
 55   BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
 56   switch (bs_asm->nmethod_patching_type()) {
 57   case NMethodPatchingType::stw_instruction_and_data_patch:
 58     return -4 * (4 + slow_path_size(nm));
 59   case NMethodPatchingType::conc_instruction_and_data_patch:
 60     return -4 * (10 + slow_path_size(nm));
 61   }
 62   ShouldNotReachHere();
 63   return 0;
 64 }
 65 
 66 class NativeNMethodBarrier {
 67   address  _instruction_address;
 68   int*     _guard_addr;
 69   nmethod* _nm;
 70 

 71   address instruction_address() const { return _instruction_address; }
 72 
 73   int *guard_addr() {
 74     return _guard_addr;
 75   }
 76 
 77   int local_guard_offset(nmethod* nm) {
 78     // It's the last instruction
 79     return (-entry_barrier_offset(nm)) - 4;
 80   }
 81 
 82 public:
 83   NativeNMethodBarrier(nmethod* nm): _nm(nm) {
 84 #if INCLUDE_JVMCI
 85     if (nm->is_compiled_by_jvmci()) {

 86       address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
 87       RelocIterator iter(nm, pc, pc + 4);
 88       guarantee(iter.next(), "missing relocs");
 89       guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");
 90 
 91       _guard_addr = (int*) iter.section_word_reloc()->target();
 92       _instruction_address = pc;
 93     } else
 94 #endif
 95       {
 96         _instruction_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);

 97         if (nm->is_compiled_by_c2()) {
 98           // With c2 compiled code, the guard is out-of-line in a stub
 99           // We find it using the RelocIterator.
100           RelocIterator iter(nm);
101           while (iter.next()) {
102             if (iter.type() == relocInfo::entry_guard_type) {
103               entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
104               _guard_addr = reinterpret_cast<int*>(reloc->addr());
105               return;
106             }
107           }
108           ShouldNotReachHere();
109         }
110         _guard_addr =  reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
111       }
112   }
113 
114   int get_value() {
115     return AtomicAccess::load_acquire(guard_addr());
116   }

176 
177   assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
178   assert(frame.cb() == nm, "must be");
179   frame = frame.sender(&reg_map);
180 
181   LogTarget(Trace, nmethod, barrier) out;
182   if (out.is_enabled()) {
183     ResourceMark mark;
184     log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
185                                 nm->method()->name_and_sig_as_C_string(),
186                                 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
187                                 thread->name(), frame.sp(), nm->verified_entry_point());
188   }
189 
190   new_frame->sp = frame.sp();
191   new_frame->fp = frame.fp();
192   new_frame->lr = frame.pc();
193   new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
194 }
195 

























196 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value, int bit_mask) {
197   if (!supports_entry_barrier(nm)) {
198     return;
199   }
200 
201   if (value == disarmed_guard_value()) {
202     // The patching epoch is incremented before the nmethod is disarmed. Disarming
203     // is performed with a release store. In the nmethod entry barrier, the values
204     // are read in the opposite order, such that the load of the nmethod guard
205     // acquires the patching epoch. This way, the guard is guaranteed to block
206     // entries to the nmethod, until it has safely published the requirement for
207     // further fencing by mutators, before they are allowed to enter.
208     BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
209     bs_asm->increment_patching_epoch();
210   }
211 
212   NativeNMethodBarrier barrier(nm);
213   barrier.set_value(value, bit_mask);
214 }
215 
216 int BarrierSetNMethod::guard_value(nmethod* nm) {
217   if (!supports_entry_barrier(nm)) {
218     return disarmed_guard_value();
219   }
220 
221   NativeNMethodBarrier barrier(nm);
222   return barrier.get_value();
223 }
224 
225 #if INCLUDE_JVMCI
226 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
227   NativeNMethodBarrier barrier(nm);
228   return barrier.check_barrier(msg);
229 }
230 #endif

 51 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
 52 // not find the expected native instruction at this offset, which needs updating.
 53 // Note that this offset is invariant of PreserveFramePointer.
 54 static int entry_barrier_offset(nmethod* nm) {
 55   BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
 56   switch (bs_asm->nmethod_patching_type()) {
 57   case NMethodPatchingType::stw_instruction_and_data_patch:
 58     return -4 * (4 + slow_path_size(nm));
 59   case NMethodPatchingType::conc_instruction_and_data_patch:
 60     return -4 * (10 + slow_path_size(nm));
 61   }
 62   ShouldNotReachHere();
 63   return 0;
 64 }
 65 
 66 class NativeNMethodBarrier {
 67   address  _instruction_address;
 68   int*     _guard_addr;
 69   nmethod* _nm;
 70 
 71 public:
 72   address instruction_address() const { return _instruction_address; }
 73 
 74   int *guard_addr() {
 75     return _guard_addr;
 76   }
 77 
 78   int local_guard_offset(nmethod* nm) {
 79     // It's the last instruction
 80     return (-entry_barrier_offset(nm)) - 4;
 81   }
 82 
 83   NativeNMethodBarrier(nmethod* nm, address alt_entry_instruction_address = 0): _nm(nm) {

 84 #if INCLUDE_JVMCI
 85     if (nm->is_compiled_by_jvmci()) {
 86       assert(alt_entry_instruction_address == 0, "invariant");
 87       address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
 88       RelocIterator iter(nm, pc, pc + 4);
 89       guarantee(iter.next(), "missing relocs");
 90       guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");
 91 
 92       _guard_addr = (int*) iter.section_word_reloc()->target();
 93       _instruction_address = pc;
 94     } else
 95 #endif
 96       {
 97         _instruction_address = (alt_entry_instruction_address != 0) ? alt_entry_instruction_address :
 98           nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
 99         if (nm->is_compiled_by_c2()) {
100           // With c2 compiled code, the guard is out-of-line in a stub
101           // We find it using the RelocIterator.
102           RelocIterator iter(nm);
103           while (iter.next()) {
104             if (iter.type() == relocInfo::entry_guard_type) {
105               entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
106               _guard_addr = reinterpret_cast<int*>(reloc->addr());
107               return;
108             }
109           }
110           ShouldNotReachHere();
111         }
112         _guard_addr =  reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
113       }
114   }
115 
116   int get_value() {
117     return AtomicAccess::load_acquire(guard_addr());
118   }

178 
179   assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
180   assert(frame.cb() == nm, "must be");
181   frame = frame.sender(&reg_map);
182 
183   LogTarget(Trace, nmethod, barrier) out;
184   if (out.is_enabled()) {
185     ResourceMark mark;
186     log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
187                                 nm->method()->name_and_sig_as_C_string(),
188                                 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
189                                 thread->name(), frame.sp(), nm->verified_entry_point());
190   }
191 
192   new_frame->sp = frame.sp();
193   new_frame->fp = frame.fp();
194   new_frame->lr = frame.pc();
195   new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
196 }
197 
198 static void set_value(nmethod* nm, jint val, int bit_mask) {
199   NativeNMethodBarrier cmp1 = NativeNMethodBarrier(nm);
200   cmp1.set_value(val, bit_mask);
201 
202   if (!nm->is_osr_method() && nm->method()->has_scalarized_args()) {
203     // nmethods with scalarized arguments have multiple entry points that each have an own nmethod entry barrier
204     assert(nm->verified_entry_point() != nm->verified_inline_entry_point(), "scalarized entry point not found");
205     address method_body = nm->is_compiled_by_c1() ? nm->verified_inline_entry_point() : nm->verified_entry_point();
206     address entry_point2 = nm->is_compiled_by_c1() ? nm->verified_entry_point() : nm->verified_inline_entry_point();
207 
208     int barrier_offset = cmp1.instruction_address() - method_body;
209     NativeNMethodBarrier cmp2 = NativeNMethodBarrier(nm, entry_point2 + barrier_offset);
210     assert(cmp1.instruction_address() != cmp2.instruction_address(), "sanity");
211     DEBUG_ONLY(cmp2.verify());
212     cmp2.set_value(val, bit_mask);
213 
214     if (method_body != nm->verified_inline_ro_entry_point() && entry_point2 != nm->verified_inline_ro_entry_point()) {
215       NativeNMethodBarrier cmp3 = NativeNMethodBarrier(nm, nm->verified_inline_ro_entry_point() + barrier_offset);
216       assert(cmp1.instruction_address() != cmp3.instruction_address() && cmp2.instruction_address() != cmp3.instruction_address(), "sanity");
217       DEBUG_ONLY(cmp3.verify());
218       cmp3.set_value(val, bit_mask);
219     }
220   }
221 }
222 
223 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value, int bit_mask) {
224   if (!supports_entry_barrier(nm)) {
225     return;
226   }
227 
228   if (value == disarmed_guard_value()) {
229     // The patching epoch is incremented before the nmethod is disarmed. Disarming
230     // is performed with a release store. In the nmethod entry barrier, the values
231     // are read in the opposite order, such that the load of the nmethod guard
232     // acquires the patching epoch. This way, the guard is guaranteed to block
233     // entries to the nmethod, until it has safely published the requirement for
234     // further fencing by mutators, before they are allowed to enter.
235     BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
236     bs_asm->increment_patching_epoch();
237   }
238 
239   set_value(nm, value, bit_mask);

240 }
241 
242 int BarrierSetNMethod::guard_value(nmethod* nm) {
243   if (!supports_entry_barrier(nm)) {
244     return disarmed_guard_value();
245   }
246 
247   NativeNMethodBarrier barrier(nm);
248   return barrier.get_value();
249 }
250 
251 #if INCLUDE_JVMCI
252 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
253   NativeNMethodBarrier barrier(nm);
254   return barrier.check_barrier(msg);
255 }
256 #endif
< prev index next >