1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/codeCache.hpp"
 26 #include "code/nativeInst.hpp"
 27 #include "gc/shared/barrierSet.hpp"
 28 #include "gc/shared/barrierSetAssembler.hpp"
 29 #include "gc/shared/barrierSetNMethod.hpp"
 30 #include "logging/log.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "runtime/frame.inline.hpp"
 33 #include "runtime/javaThread.hpp"
 34 #include "runtime/registerMap.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "utilities/align.hpp"
 37 #include "utilities/debug.hpp"
 38 #include "utilities/formatBuffer.hpp"
 39 #if INCLUDE_JVMCI
 40 #include "jvmci/jvmciRuntime.hpp"
 41 #endif
 42 
 43 static int slow_path_size(nmethod* nm) {
 44   // The slow path code is out of line with C2
 45   return nm->is_compiled_by_c2() ? 0 : 6;
 46 }
 47 
 48 // This is the offset of the entry barrier relative to where the frame is completed.
 49 // If any code changes between the end of the verified entry where the entry
 50 // barrier resides, and the completion of the frame, then
 51 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
 52 // not find the expected native instruction at this offset, which needs updating.
 53 // Note that this offset is invariant of PreserveFramePointer.
 54 static int entry_barrier_offset(nmethod* nm) {
 55   BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
 56   switch (bs_asm->nmethod_patching_type()) {
 57   case NMethodPatchingType::stw_instruction_and_data_patch:
 58     return -4 * (4 + slow_path_size(nm));
 59   case NMethodPatchingType::conc_instruction_and_data_patch:
 60     return -4 * (10 + slow_path_size(nm));
 61   }
 62   ShouldNotReachHere();
 63   return 0;
 64 }
 65 
 66 class NativeNMethodBarrier {
 67   address  _instruction_address;
 68   int*     _guard_addr;
 69   nmethod* _nm;
 70 
 71 public:
 72   address instruction_address() const { return _instruction_address; }
 73 
 74   int *guard_addr() {
 75     return _guard_addr;
 76   }
 77 
 78   int local_guard_offset(nmethod* nm) {
 79     // It's the last instruction
 80     return (-entry_barrier_offset(nm)) - 4;
 81   }
 82 
 83   NativeNMethodBarrier(nmethod* nm, address alt_entry_instruction_address = 0): _nm(nm) {
 84 #if INCLUDE_JVMCI
 85     if (nm->is_compiled_by_jvmci()) {
 86       assert(alt_entry_instruction_address == 0, "invariant");
 87       address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
 88       RelocIterator iter(nm, pc, pc + 4);
 89       guarantee(iter.next(), "missing relocs");
 90       guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");
 91 
 92       _guard_addr = (int*) iter.section_word_reloc()->target();
 93       _instruction_address = pc;
 94     } else
 95 #endif
 96       {
 97         _instruction_address = (alt_entry_instruction_address != 0) ? alt_entry_instruction_address :
 98           nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
 99         if (nm->is_compiled_by_c2()) {
100           // With c2 compiled code, the guard is out-of-line in a stub
101           // We find it using the RelocIterator.
102           RelocIterator iter(nm);
103           while (iter.next()) {
104             if (iter.type() == relocInfo::entry_guard_type) {
105               entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
106               _guard_addr = reinterpret_cast<int*>(reloc->addr());
107               return;
108             }
109           }
110           ShouldNotReachHere();
111         }
112         _guard_addr =  reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
113       }
114   }
115 
116   int get_value() {
117     return AtomicAccess::load_acquire(guard_addr());
118   }
119 
120   void set_value(int value, int bit_mask) {
121     if (bit_mask == ~0) {
122       AtomicAccess::release_store(guard_addr(), value);
123       return;
124     }
125     assert((value & ~bit_mask) == 0, "trying to set bits outside the mask");
126     value &= bit_mask;
127     int old_value = AtomicAccess::load(guard_addr());
128     while (true) {
129       // Only bits in the mask are changed
130       int new_value = value | (old_value & ~bit_mask);
131       if (new_value == old_value) break;
132       int v = AtomicAccess::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
133       if (v == old_value) break;
134       old_value = v;
135     }
136   }
137 
138   bool check_barrier(err_msg& msg) const;
139   void verify() const {
140     err_msg msg("%s", "");
141     assert(check_barrier(msg), "%s", msg.buffer());
142   }
143 };
144 
145 // The first instruction of the nmethod entry barrier is an ldr (literal)
146 // instruction. Verify that it's really there, so the offsets are not skewed.
147 bool NativeNMethodBarrier::check_barrier(err_msg& msg) const {
148   uint32_t* addr = (uint32_t*) instruction_address();
149   uint32_t inst = *addr;
150   if ((inst & 0xff000000) != 0x18000000) {
151     msg.print("Nmethod entry barrier did not start with ldr (literal) as expected. "
152               "Addr: " PTR_FORMAT " Code: " UINT32_FORMAT, p2i(addr), inst);
153     return false;
154   }
155   return true;
156 }
157 
158 
159 /* We're called from an nmethod when we need to deoptimize it. We do
160    this by throwing away the nmethod's frame and jumping to the
161    ic_miss stub. This looks like there has been an IC miss at the
162    entry of the nmethod, so we resolve the call, which will fall back
163    to the interpreter if the nmethod has been unloaded. */
164 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
165 
166   typedef struct {
167     intptr_t *sp; intptr_t *fp; address lr; address pc;
168   } frame_pointers_t;
169 
170   frame_pointers_t *new_frame = (frame_pointers_t *)(return_address_ptr - 5);
171 
172   JavaThread *thread = JavaThread::current();
173   RegisterMap reg_map(thread,
174                       RegisterMap::UpdateMap::skip,
175                       RegisterMap::ProcessFrames::include,
176                       RegisterMap::WalkContinuation::skip);
177   frame frame = thread->last_frame();
178 
179   assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
180   assert(frame.cb() == nm, "must be");
181   frame = frame.sender(&reg_map);
182 
183   LogTarget(Trace, nmethod, barrier) out;
184   if (out.is_enabled()) {
185     ResourceMark mark;
186     log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
187                                 nm->method()->name_and_sig_as_C_string(),
188                                 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
189                                 thread->name(), frame.sp(), nm->verified_entry_point());
190   }
191 
192   new_frame->sp = frame.sp();
193   new_frame->fp = frame.fp();
194   new_frame->lr = frame.pc();
195   new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
196 }
197 
198 static void set_value(nmethod* nm, jint val, int bit_mask) {
199   NativeNMethodBarrier cmp1 = NativeNMethodBarrier(nm);
200   cmp1.set_value(val, bit_mask);
201 
202   if (!nm->is_osr_method() && nm->method()->has_scalarized_args()) {
203     // nmethods with scalarized arguments have multiple entry points that each have an own nmethod entry barrier
204     assert(nm->verified_entry_point() != nm->verified_inline_entry_point(), "scalarized entry point not found");
205     address method_body = nm->is_compiled_by_c1() ? nm->verified_inline_entry_point() : nm->verified_entry_point();
206     address entry_point2 = nm->is_compiled_by_c1() ? nm->verified_entry_point() : nm->verified_inline_entry_point();
207 
208     int barrier_offset = cmp1.instruction_address() - method_body;
209     NativeNMethodBarrier cmp2 = NativeNMethodBarrier(nm, entry_point2 + barrier_offset);
210     assert(cmp1.instruction_address() != cmp2.instruction_address(), "sanity");
211     DEBUG_ONLY(cmp2.verify());
212     cmp2.set_value(val, bit_mask);
213 
214     if (method_body != nm->verified_inline_ro_entry_point() && entry_point2 != nm->verified_inline_ro_entry_point()) {
215       NativeNMethodBarrier cmp3 = NativeNMethodBarrier(nm, nm->verified_inline_ro_entry_point() + barrier_offset);
216       assert(cmp1.instruction_address() != cmp3.instruction_address() && cmp2.instruction_address() != cmp3.instruction_address(), "sanity");
217       DEBUG_ONLY(cmp3.verify());
218       cmp3.set_value(val, bit_mask);
219     }
220   }
221 }
222 
223 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value, int bit_mask) {
224   if (!supports_entry_barrier(nm)) {
225     return;
226   }
227 
228   if (value == disarmed_guard_value()) {
229     // The patching epoch is incremented before the nmethod is disarmed. Disarming
230     // is performed with a release store. In the nmethod entry barrier, the values
231     // are read in the opposite order, such that the load of the nmethod guard
232     // acquires the patching epoch. This way, the guard is guaranteed to block
233     // entries to the nmethod, until it has safely published the requirement for
234     // further fencing by mutators, before they are allowed to enter.
235     BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
236     bs_asm->increment_patching_epoch();
237   }
238 
239   set_value(nm, value, bit_mask);
240 }
241 
242 int BarrierSetNMethod::guard_value(nmethod* nm) {
243   if (!supports_entry_barrier(nm)) {
244     return disarmed_guard_value();
245   }
246 
247   NativeNMethodBarrier barrier(nm);
248   return barrier.get_value();
249 }
250 
251 #if INCLUDE_JVMCI
252 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
253   NativeNMethodBarrier barrier(nm);
254   return barrier.check_barrier(msg);
255 }
256 #endif