1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/codeCache.hpp"
 26 #include "code/nativeInst.hpp"
 27 #include "gc/shared/barrierSet.hpp"
 28 #include "gc/shared/barrierSetAssembler.hpp"
 29 #include "gc/shared/barrierSetNMethod.hpp"
 30 #include "logging/log.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "runtime/frame.inline.hpp"
 33 #include "runtime/javaThread.hpp"
 34 #include "runtime/registerMap.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "utilities/align.hpp"
 37 #include "utilities/debug.hpp"
 38 #include "utilities/formatBuffer.hpp"
 39 #if INCLUDE_JVMCI
 40 #include "jvmci/jvmciRuntime.hpp"
 41 #endif
 42 
 43 static int slow_path_size(nmethod* nm) {
 44   // The slow path code is out of line with C2
 45   return nm->is_compiled_by_c2() ? 0 : 6;
 46 }
 47 
 48 // This is the offset of the entry barrier relative to where the frame is completed.
 49 // If any code changes between the end of the verified entry where the entry
 50 // barrier resides, and the completion of the frame, then
 51 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
 52 // not find the expected native instruction at this offset, which needs updating.
 53 // Note that this offset is invariant of PreserveFramePointer.
 54 static int entry_barrier_offset(nmethod* nm) {
 55   BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
 56   switch (bs_asm->nmethod_patching_type()) {
 57   case NMethodPatchingType::stw_instruction_and_data_patch:
 58     return -4 * (4 + slow_path_size(nm));
 59   case NMethodPatchingType::conc_instruction_and_data_patch:
 60     return -4 * (10 + slow_path_size(nm));
 61   case NMethodPatchingType::conc_data_patch:
 62     return -4 * (5 + slow_path_size(nm));
 63   }
 64   ShouldNotReachHere();
 65   return 0;
 66 }
 67 
 68 class NativeNMethodBarrier {
 69   address  _instruction_address;
 70   int*     _guard_addr;
 71   nmethod* _nm;
 72 
 73   address instruction_address() const { return _instruction_address; }
 74 
 75   int *guard_addr() {
 76     return _guard_addr;
 77   }
 78 
 79   int local_guard_offset(nmethod* nm) {
 80     // It's the last instruction
 81     return (-entry_barrier_offset(nm)) - 4;
 82   }
 83 
 84 public:
 85   NativeNMethodBarrier(nmethod* nm): _nm(nm) {
 86 #if INCLUDE_JVMCI
 87     if (nm->is_compiled_by_jvmci()) {
 88       address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
 89       RelocIterator iter(nm, pc, pc + 4);
 90       guarantee(iter.next(), "missing relocs");
 91       guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");
 92 
 93       _guard_addr = (int*) iter.section_word_reloc()->target();
 94       _instruction_address = pc;
 95     } else
 96 #endif
 97       {
 98         _instruction_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
 99         if (nm->is_compiled_by_c2()) {
100           // With c2 compiled code, the guard is out-of-line in a stub
101           // We find it using the RelocIterator.
102           RelocIterator iter(nm);
103           while (iter.next()) {
104             if (iter.type() == relocInfo::entry_guard_type) {
105               entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
106               _guard_addr = reinterpret_cast<int*>(reloc->addr());
107               return;
108             }
109           }
110           ShouldNotReachHere();
111         }
112         _guard_addr =  reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
113       }
114   }
115 
116   int get_value() {
117     return Atomic::load_acquire(guard_addr());
118   }
119 
120   void set_value(int value) {
121     Atomic::release_store(guard_addr(), value);
122   }
123 
124   bool check_barrier(err_msg& msg) const;
125   void verify() const {
126     err_msg msg("%s", "");
127     assert(check_barrier(msg), "%s", msg.buffer());
128   }
129 };
130 
131 // The first instruction of the nmethod entry barrier is an ldr (literal)
132 // instruction. Verify that it's really there, so the offsets are not skewed.
133 bool NativeNMethodBarrier::check_barrier(err_msg& msg) const {
134   uint32_t* addr = (uint32_t*) instruction_address();
135   uint32_t inst = *addr;
136   if ((inst & 0xff000000) != 0x18000000) {
137     msg.print("Nmethod entry barrier did not start with ldr (literal) as expected. "
138               "Addr: " PTR_FORMAT " Code: " UINT32_FORMAT, p2i(addr), inst);
139     return false;
140   }
141   return true;
142 }
143 
144 
145 /* We're called from an nmethod when we need to deoptimize it. We do
146    this by throwing away the nmethod's frame and jumping to the
147    ic_miss stub. This looks like there has been an IC miss at the
148    entry of the nmethod, so we resolve the call, which will fall back
149    to the interpreter if the nmethod has been unloaded. */
150 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
151 
152   typedef struct {
153     intptr_t *sp; intptr_t *fp; address lr; address pc;
154   } frame_pointers_t;
155 
156   frame_pointers_t *new_frame = (frame_pointers_t *)(return_address_ptr - 5);
157 
158   JavaThread *thread = JavaThread::current();
159   RegisterMap reg_map(thread,
160                       RegisterMap::UpdateMap::skip,
161                       RegisterMap::ProcessFrames::include,
162                       RegisterMap::WalkContinuation::skip);
163   frame frame = thread->last_frame();
164 
165   assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
166   assert(frame.cb() == nm, "must be");
167   frame = frame.sender(&reg_map);
168 
169   LogTarget(Trace, nmethod, barrier) out;
170   if (out.is_enabled()) {
171     ResourceMark mark;
172     log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
173                                 nm->method()->name_and_sig_as_C_string(),
174                                 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
175                                 thread->name(), frame.sp(), nm->verified_entry_point());
176   }
177 
178   new_frame->sp = frame.sp();
179   new_frame->fp = frame.fp();
180   new_frame->lr = frame.pc();
181   new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
182 }
183 
184 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
185   if (!supports_entry_barrier(nm)) {
186     return;
187   }
188 
189   if (value == disarmed_guard_value()) {
190     // The patching epoch is incremented before the nmethod is disarmed. Disarming
191     // is performed with a release store. In the nmethod entry barrier, the values
192     // are read in the opposite order, such that the load of the nmethod guard
193     // acquires the patching epoch. This way, the guard is guaranteed to block
194     // entries to the nmethod, until it has safely published the requirement for
195     // further fencing by mutators, before they are allowed to enter.
196     BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
197     bs_asm->increment_patching_epoch();
198   }
199 
200   NativeNMethodBarrier barrier(nm);
201   barrier.set_value(value);
202 }
203 
204 int BarrierSetNMethod::guard_value(nmethod* nm) {
205   if (!supports_entry_barrier(nm)) {
206     return disarmed_guard_value();
207   }
208 
209   NativeNMethodBarrier barrier(nm);
210   return barrier.get_value();
211 }
212 
213 #if INCLUDE_JVMCI
214 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
215   NativeNMethodBarrier barrier(nm);
216   return barrier.check_barrier(msg);
217 }
218 #endif