54 // Note that this offset is invariant of PreserveFramePointer.
55 static int entry_barrier_offset(nmethod* nm) {
56 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
57 switch (bs_asm->nmethod_patching_type()) {
58 case NMethodPatchingType::stw_instruction_and_data_patch:
59 return -4 * (4 + slow_path_size(nm));
60 case NMethodPatchingType::conc_instruction_and_data_patch:
61 return -4 * (10 + slow_path_size(nm));
62 case NMethodPatchingType::conc_data_patch:
63 return -4 * (5 + slow_path_size(nm));
64 }
65 ShouldNotReachHere();
66 return 0;
67 }
68
69 class NativeNMethodBarrier {
70 address _instruction_address;
71 int* _guard_addr;
72 nmethod* _nm;
73
74 address instruction_address() const { return _instruction_address; }
75
76 int *guard_addr() {
77 return _guard_addr;
78 }
79
80 int local_guard_offset(nmethod* nm) {
81 // It's the last instruction
82 return (-entry_barrier_offset(nm)) - 4;
83 }
84
85 public:
86 NativeNMethodBarrier(nmethod* nm): _nm(nm) {
87 #if INCLUDE_JVMCI
88 if (nm->is_compiled_by_jvmci()) {
89 address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
90 RelocIterator iter(nm, pc, pc + 4);
91 guarantee(iter.next(), "missing relocs");
92 guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");
93
94 _guard_addr = (int*) iter.section_word_reloc()->target();
95 _instruction_address = pc;
96 } else
97 #endif
98 {
99 _instruction_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
100 if (nm->is_compiled_by_c2()) {
101 // With c2 compiled code, the guard is out-of-line in a stub
102 // We find it using the RelocIterator.
103 RelocIterator iter(nm);
104 while (iter.next()) {
105 if (iter.type() == relocInfo::entry_guard_type) {
106 entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
107 _guard_addr = reinterpret_cast<int*>(reloc->addr());
108 return;
109 }
110 }
111 ShouldNotReachHere();
112 }
113 _guard_addr = reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
114 }
115 }
116
117 int get_value() {
118 return Atomic::load_acquire(guard_addr());
119 }
165
166 assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
167 assert(frame.cb() == nm, "must be");
168 frame = frame.sender(®_map);
169
170 LogTarget(Trace, nmethod, barrier) out;
171 if (out.is_enabled()) {
172 ResourceMark mark;
173 log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
174 nm->method()->name_and_sig_as_C_string(),
175 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
176 thread->name(), frame.sp(), nm->verified_entry_point());
177 }
178
179 new_frame->sp = frame.sp();
180 new_frame->fp = frame.fp();
181 new_frame->lr = frame.pc();
182 new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
183 }
184
185 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
186 if (!supports_entry_barrier(nm)) {
187 return;
188 }
189
190 if (value == disarmed_guard_value()) {
191 // The patching epoch is incremented before the nmethod is disarmed. Disarming
192 // is performed with a release store. In the nmethod entry barrier, the values
193 // are read in the opposite order, such that the load of the nmethod guard
194 // acquires the patching epoch. This way, the guard is guaranteed to block
195 // entries to the nmethod, until it has safely published the requirement for
196 // further fencing by mutators, before they are allowed to enter.
197 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
198 bs_asm->increment_patching_epoch();
199 }
200
201 NativeNMethodBarrier barrier(nm);
202 barrier.set_value(value);
203 }
204
205 int BarrierSetNMethod::guard_value(nmethod* nm) {
206 if (!supports_entry_barrier(nm)) {
207 return disarmed_guard_value();
208 }
209
210 NativeNMethodBarrier barrier(nm);
211 return barrier.get_value();
212 }
213
214 #if INCLUDE_JVMCI
215 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
216 NativeNMethodBarrier barrier(nm);
217 return barrier.check_barrier(msg);
218 }
219 #endif
|
54 // Note that this offset is invariant of PreserveFramePointer.
55 static int entry_barrier_offset(nmethod* nm) {
56 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
57 switch (bs_asm->nmethod_patching_type()) {
58 case NMethodPatchingType::stw_instruction_and_data_patch:
59 return -4 * (4 + slow_path_size(nm));
60 case NMethodPatchingType::conc_instruction_and_data_patch:
61 return -4 * (10 + slow_path_size(nm));
62 case NMethodPatchingType::conc_data_patch:
63 return -4 * (5 + slow_path_size(nm));
64 }
65 ShouldNotReachHere();
66 return 0;
67 }
68
69 class NativeNMethodBarrier {
70 address _instruction_address;
71 int* _guard_addr;
72 nmethod* _nm;
73
74 public:
75 address instruction_address() const { return _instruction_address; }
76
77 int *guard_addr() {
78 return _guard_addr;
79 }
80
81 int local_guard_offset(nmethod* nm) {
82 // It's the last instruction
83 return (-entry_barrier_offset(nm)) - 4;
84 }
85
86 NativeNMethodBarrier(nmethod* nm, address alt_entry_instruction_address = 0): _nm(nm) {
87 #if INCLUDE_JVMCI
88 if (nm->is_compiled_by_jvmci()) {
89 assert(alt_entry_instruction_address == 0, "invariant");
90 address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
91 RelocIterator iter(nm, pc, pc + 4);
92 guarantee(iter.next(), "missing relocs");
93 guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");
94
95 _guard_addr = (int*) iter.section_word_reloc()->target();
96 _instruction_address = pc;
97 } else
98 #endif
99 {
100 _instruction_address = (alt_entry_instruction_address != 0) ? alt_entry_instruction_address :
101 nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
102 if (nm->is_compiled_by_c2()) {
103 // With c2 compiled code, the guard is out-of-line in a stub
104 // We find it using the RelocIterator.
105 RelocIterator iter(nm);
106 while (iter.next()) {
107 if (iter.type() == relocInfo::entry_guard_type) {
108 entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
109 _guard_addr = reinterpret_cast<int*>(reloc->addr());
110 return;
111 }
112 }
113 ShouldNotReachHere();
114 }
115 _guard_addr = reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
116 }
117 }
118
119 int get_value() {
120 return Atomic::load_acquire(guard_addr());
121 }
167
168 assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
169 assert(frame.cb() == nm, "must be");
170 frame = frame.sender(®_map);
171
172 LogTarget(Trace, nmethod, barrier) out;
173 if (out.is_enabled()) {
174 ResourceMark mark;
175 log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
176 nm->method()->name_and_sig_as_C_string(),
177 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
178 thread->name(), frame.sp(), nm->verified_entry_point());
179 }
180
181 new_frame->sp = frame.sp();
182 new_frame->fp = frame.fp();
183 new_frame->lr = frame.pc();
184 new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
185 }
186
187 static void set_value(nmethod* nm, jint val) {
188 NativeNMethodBarrier cmp1 = NativeNMethodBarrier(nm);
189 cmp1.set_value(val);
190
191 if (!nm->is_osr_method() && nm->method()->has_scalarized_args()) {
192 // nmethods with scalarized arguments have multiple entry points that each have an own nmethod entry barrier
193 assert(nm->verified_entry_point() != nm->verified_inline_entry_point(), "scalarized entry point not found");
194 address method_body = nm->is_compiled_by_c1() ? nm->verified_inline_entry_point() : nm->verified_entry_point();
195 address entry_point2 = nm->is_compiled_by_c1() ? nm->verified_entry_point() : nm->verified_inline_entry_point();
196
197 int barrier_offset = cmp1.instruction_address() - method_body;
198 NativeNMethodBarrier cmp2 = NativeNMethodBarrier(nm, entry_point2 + barrier_offset);
199 assert(cmp1.instruction_address() != cmp2.instruction_address(), "sanity");
200 debug_only(cmp2.verify());
201 cmp2.set_value(val);
202
203 if (method_body != nm->verified_inline_ro_entry_point() && entry_point2 != nm->verified_inline_ro_entry_point()) {
204 NativeNMethodBarrier cmp3 = NativeNMethodBarrier(nm, nm->verified_inline_ro_entry_point() + barrier_offset);
205 assert(cmp1.instruction_address() != cmp3.instruction_address() && cmp2.instruction_address() != cmp3.instruction_address(), "sanity");
206 debug_only(cmp3.verify());
207 cmp3.set_value(val);
208 }
209 }
210 }
211
212 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
213 if (!supports_entry_barrier(nm)) {
214 return;
215 }
216
217 if (value == disarmed_guard_value()) {
218 // The patching epoch is incremented before the nmethod is disarmed. Disarming
219 // is performed with a release store. In the nmethod entry barrier, the values
220 // are read in the opposite order, such that the load of the nmethod guard
221 // acquires the patching epoch. This way, the guard is guaranteed to block
222 // entries to the nmethod, until it has safely published the requirement for
223 // further fencing by mutators, before they are allowed to enter.
224 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
225 bs_asm->increment_patching_epoch();
226 }
227
228 set_value(nm, value);
229 }
230
231 int BarrierSetNMethod::guard_value(nmethod* nm) {
232 if (!supports_entry_barrier(nm)) {
233 return disarmed_guard_value();
234 }
235
236 NativeNMethodBarrier barrier(nm);
237 return barrier.get_value();
238 }
239
240 #if INCLUDE_JVMCI
241 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
242 NativeNMethodBarrier barrier(nm);
243 return barrier.check_barrier(msg);
244 }
245 #endif
|