1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/codeCache.hpp"
 26 #include "code/nativeInst.hpp"
 27 #include "gc/shared/barrierSet.hpp"
 28 #include "gc/shared/barrierSetAssembler.hpp"
 29 #include "gc/shared/barrierSetNMethod.hpp"
 30 #include "logging/log.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "runtime/frame.inline.hpp"
 33 #include "runtime/javaThread.hpp"
 34 #include "runtime/registerMap.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "utilities/align.hpp"
 37 #include "utilities/debug.hpp"
 38 #include "utilities/formatBuffer.hpp"
 39 #if INCLUDE_JVMCI
 40 #include "jvmci/jvmciRuntime.hpp"
 41 #endif
 42 
 43 static int slow_path_size(nmethod* nm) {
 44   // The slow path code is out of line with C2
 45   return nm->is_compiled_by_c2() ? 0 : 6;
 46 }
 47 
 48 // This is the offset of the entry barrier relative to where the frame is completed.
 49 // If any code changes between the end of the verified entry where the entry
 50 // barrier resides, and the completion of the frame, then
 51 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
 52 // not find the expected native instruction at this offset, which needs updating.
 53 // Note that this offset is invariant of PreserveFramePointer.
 54 static int entry_barrier_offset(nmethod* nm) {
 55   BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
 56   switch (bs_asm->nmethod_patching_type()) {
 57   case NMethodPatchingType::stw_instruction_and_data_patch:
 58     return -4 * (4 + slow_path_size(nm));
 59   case NMethodPatchingType::conc_instruction_and_data_patch:
 60     return -4 * (10 + slow_path_size(nm));
 61   }
 62   ShouldNotReachHere();
 63   return 0;
 64 }
 65 
 66 class NativeNMethodBarrier {
 67   address  _instruction_address;
 68   int*     _guard_addr;
 69   nmethod* _nm;
 70 
 71   address instruction_address() const { return _instruction_address; }
 72 
 73   int *guard_addr() {
 74     return _guard_addr;
 75   }
 76 
 77   int local_guard_offset(nmethod* nm) {
 78     // It's the last instruction
 79     return (-entry_barrier_offset(nm)) - 4;
 80   }
 81 
 82 public:
 83   NativeNMethodBarrier(nmethod* nm): _nm(nm) {
 84 #if INCLUDE_JVMCI
 85     if (nm->is_compiled_by_jvmci()) {
 86       address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
 87       RelocIterator iter(nm, pc, pc + 4);
 88       guarantee(iter.next(), "missing relocs");
 89       guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");
 90 
 91       _guard_addr = (int*) iter.section_word_reloc()->target();
 92       _instruction_address = pc;
 93     } else
 94 #endif
 95       {
 96         _instruction_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
 97         if (nm->is_compiled_by_c2()) {
 98           // With c2 compiled code, the guard is out-of-line in a stub
 99           // We find it using the RelocIterator.
100           RelocIterator iter(nm);
101           while (iter.next()) {
102             if (iter.type() == relocInfo::entry_guard_type) {
103               entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
104               _guard_addr = reinterpret_cast<int*>(reloc->addr());
105               return;
106             }
107           }
108           ShouldNotReachHere();
109         }
110         _guard_addr =  reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
111       }
112   }
113 
114   int get_value() {
115     return AtomicAccess::load_acquire(guard_addr());
116   }
117 
118   void set_value(int value, int bit_mask) {
119     if (bit_mask == ~0) {
120       AtomicAccess::release_store(guard_addr(), value);
121       return;
122     }
123     assert((value & ~bit_mask) == 0, "trying to set bits outside the mask");
124     value &= bit_mask;
125     int old_value = AtomicAccess::load(guard_addr());
126     while (true) {
127       // Only bits in the mask are changed
128       int new_value = value | (old_value & ~bit_mask);
129       if (new_value == old_value) break;
130       int v = AtomicAccess::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
131       if (v == old_value) break;
132       old_value = v;
133     }
134   }
135 
136   bool check_barrier(err_msg& msg) const;
137   void verify() const {
138     err_msg msg("%s", "");
139     assert(check_barrier(msg), "%s", msg.buffer());
140   }
141 };
142 
143 // The first instruction of the nmethod entry barrier is an ldr (literal)
144 // instruction. Verify that it's really there, so the offsets are not skewed.
145 bool NativeNMethodBarrier::check_barrier(err_msg& msg) const {
146   uint32_t* addr = (uint32_t*) instruction_address();
147   uint32_t inst = *addr;
148   if ((inst & 0xff000000) != 0x18000000) {
149     msg.print("Nmethod entry barrier did not start with ldr (literal) as expected. "
150               "Addr: " PTR_FORMAT " Code: " UINT32_FORMAT, p2i(addr), inst);
151     return false;
152   }
153   return true;
154 }
155 
156 
157 /* We're called from an nmethod when we need to deoptimize it. We do
158    this by throwing away the nmethod's frame and jumping to the
159    ic_miss stub. This looks like there has been an IC miss at the
160    entry of the nmethod, so we resolve the call, which will fall back
161    to the interpreter if the nmethod has been unloaded. */
162 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
163 
164   typedef struct {
165     intptr_t *sp; intptr_t *fp; address lr; address pc;
166   } frame_pointers_t;
167 
168   frame_pointers_t *new_frame = (frame_pointers_t *)(return_address_ptr - 5);
169 
170   JavaThread *thread = JavaThread::current();
171   RegisterMap reg_map(thread,
172                       RegisterMap::UpdateMap::skip,
173                       RegisterMap::ProcessFrames::include,
174                       RegisterMap::WalkContinuation::skip);
175   frame frame = thread->last_frame();
176 
177   assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
178   assert(frame.cb() == nm, "must be");
179   frame = frame.sender(&reg_map);
180 
181   LogTarget(Trace, nmethod, barrier) out;
182   if (out.is_enabled()) {
183     ResourceMark mark;
184     log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
185                                 nm->method()->name_and_sig_as_C_string(),
186                                 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
187                                 thread->name(), frame.sp(), nm->verified_entry_point());
188   }
189 
190   new_frame->sp = frame.sp();
191   new_frame->fp = frame.fp();
192   new_frame->lr = frame.pc();
193   new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
194 }
195 
196 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value, int bit_mask) {
197   if (!supports_entry_barrier(nm)) {
198     return;
199   }
200 
201   if (value == disarmed_guard_value()) {
202     // The patching epoch is incremented before the nmethod is disarmed. Disarming
203     // is performed with a release store. In the nmethod entry barrier, the values
204     // are read in the opposite order, such that the load of the nmethod guard
205     // acquires the patching epoch. This way, the guard is guaranteed to block
206     // entries to the nmethod, until it has safely published the requirement for
207     // further fencing by mutators, before they are allowed to enter.
208     BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
209     bs_asm->increment_patching_epoch();
210   }
211 
212   NativeNMethodBarrier barrier(nm);
213   barrier.set_value(value, bit_mask);
214 }
215 
216 int BarrierSetNMethod::guard_value(nmethod* nm) {
217   if (!supports_entry_barrier(nm)) {
218     return disarmed_guard_value();
219   }
220 
221   NativeNMethodBarrier barrier(nm);
222   return barrier.get_value();
223 }
224 
225 #if INCLUDE_JVMCI
226 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
227   NativeNMethodBarrier barrier(nm);
228   return barrier.check_barrier(msg);
229 }
230 #endif