1 /*
  2  * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "code/nativeInst.hpp"
 28 #include "gc/shared/barrierSet.hpp"
 29 #include "gc/shared/barrierSetAssembler.hpp"
 30 #include "gc/shared/barrierSetNMethod.hpp"
 31 #include "logging/log.hpp"
 32 #include "memory/resourceArea.hpp"
 33 #include "runtime/frame.inline.hpp"
 34 #include "runtime/javaThread.hpp"
 35 #include "runtime/registerMap.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "utilities/align.hpp"
 38 #include "utilities/debug.hpp"
 39 #include "utilities/formatBuffer.hpp"
 40 #if INCLUDE_JVMCI
 41 #include "jvmci/jvmciRuntime.hpp"
 42 #endif
 43 
 44 static int slow_path_size(nmethod* nm) {
 45   // The slow path code is out of line with C2
 46   return nm->is_compiled_by_c2() ? 0 : 6;
 47 }
 48 
 49 // This is the offset of the entry barrier relative to where the frame is completed.
 50 // If any code changes between the end of the verified entry where the entry
 51 // barrier resides, and the completion of the frame, then
 52 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
 53 // not find the expected native instruction at this offset, which needs updating.
 54 // Note that this offset is invariant of PreserveFramePointer.
 55 static int entry_barrier_offset(nmethod* nm) {
 56   BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
 57   switch (bs_asm->nmethod_patching_type()) {
 58   case NMethodPatchingType::stw_instruction_and_data_patch:
 59     return -4 * (4 + slow_path_size(nm));
 60   case NMethodPatchingType::conc_instruction_and_data_patch:
 61     return -4 * (10 + slow_path_size(nm));
 62   case NMethodPatchingType::conc_data_patch:
 63     return -4 * (5 + slow_path_size(nm));
 64   }
 65   ShouldNotReachHere();
 66   return 0;
 67 }
 68 
 69 class NativeNMethodBarrier {
 70   address  _instruction_address;
 71   int*     _guard_addr;
 72   nmethod* _nm;
 73 
 74 public:
 75   address instruction_address() const { return _instruction_address; }
 76 
 77   int *guard_addr() {
 78     return _guard_addr;
 79   }
 80 
 81   int local_guard_offset(nmethod* nm) {
 82     // It's the last instruction
 83     return (-entry_barrier_offset(nm)) - 4;
 84   }
 85 
 86   NativeNMethodBarrier(nmethod* nm, address alt_entry_instruction_address = 0): _nm(nm) {
 87 #if INCLUDE_JVMCI
 88     if (nm->is_compiled_by_jvmci()) {
 89       assert(alt_entry_instruction_address == 0, "invariant");
 90       address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
 91       RelocIterator iter(nm, pc, pc + 4);
 92       guarantee(iter.next(), "missing relocs");
 93       guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");
 94 
 95       _guard_addr = (int*) iter.section_word_reloc()->target();
 96       _instruction_address = pc;
 97     } else
 98 #endif
 99       {
100         _instruction_address = (alt_entry_instruction_address != 0) ? alt_entry_instruction_address :
101           nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
102         if (nm->is_compiled_by_c2()) {
103           // With c2 compiled code, the guard is out-of-line in a stub
104           // We find it using the RelocIterator.
105           RelocIterator iter(nm);
106           while (iter.next()) {
107             if (iter.type() == relocInfo::entry_guard_type) {
108               entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
109               _guard_addr = reinterpret_cast<int*>(reloc->addr());
110               return;
111             }
112           }
113           ShouldNotReachHere();
114         }
115         _guard_addr =  reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
116       }
117   }
118 
119   int get_value() {
120     return Atomic::load_acquire(guard_addr());
121   }
122 
123   void set_value(int value) {
124     Atomic::release_store(guard_addr(), value);
125   }
126 
127   bool check_barrier(err_msg& msg) const;
128   void verify() const {
129     err_msg msg("%s", "");
130     assert(check_barrier(msg), "%s", msg.buffer());
131   }
132 };
133 
134 // The first instruction of the nmethod entry barrier is an ldr (literal)
135 // instruction. Verify that it's really there, so the offsets are not skewed.
136 bool NativeNMethodBarrier::check_barrier(err_msg& msg) const {
137   uint32_t* addr = (uint32_t*) instruction_address();
138   uint32_t inst = *addr;
139   if ((inst & 0xff000000) != 0x18000000) {
140     msg.print("Nmethod entry barrier did not start with ldr (literal) as expected. "
141               "Addr: " PTR_FORMAT " Code: " UINT32_FORMAT, p2i(addr), inst);
142     return false;
143   }
144   return true;
145 }
146 
147 
148 /* We're called from an nmethod when we need to deoptimize it. We do
149    this by throwing away the nmethod's frame and jumping to the
150    ic_miss stub. This looks like there has been an IC miss at the
151    entry of the nmethod, so we resolve the call, which will fall back
152    to the interpreter if the nmethod has been unloaded. */
153 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
154 
155   typedef struct {
156     intptr_t *sp; intptr_t *fp; address lr; address pc;
157   } frame_pointers_t;
158 
159   frame_pointers_t *new_frame = (frame_pointers_t *)(return_address_ptr - 5);
160 
161   JavaThread *thread = JavaThread::current();
162   RegisterMap reg_map(thread,
163                       RegisterMap::UpdateMap::skip,
164                       RegisterMap::ProcessFrames::include,
165                       RegisterMap::WalkContinuation::skip);
166   frame frame = thread->last_frame();
167 
168   assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
169   assert(frame.cb() == nm, "must be");
170   frame = frame.sender(&reg_map);
171 
172   LogTarget(Trace, nmethod, barrier) out;
173   if (out.is_enabled()) {
174     ResourceMark mark;
175     log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
176                                 nm->method()->name_and_sig_as_C_string(),
177                                 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
178                                 thread->name(), frame.sp(), nm->verified_entry_point());
179   }
180 
181   new_frame->sp = frame.sp();
182   new_frame->fp = frame.fp();
183   new_frame->lr = frame.pc();
184   new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
185 }
186 
187 static void set_value(nmethod* nm, jint val) {
188   NativeNMethodBarrier cmp1 = NativeNMethodBarrier(nm);
189   cmp1.set_value(val);
190 
191   if (!nm->is_osr_method() && nm->method()->has_scalarized_args()) {
192     // nmethods with scalarized arguments have multiple entry points that each have an own nmethod entry barrier
193     assert(nm->verified_entry_point() != nm->verified_inline_entry_point(), "scalarized entry point not found");
194     address method_body = nm->is_compiled_by_c1() ? nm->verified_inline_entry_point() : nm->verified_entry_point();
195     address entry_point2 = nm->is_compiled_by_c1() ? nm->verified_entry_point() : nm->verified_inline_entry_point();
196 
197     int barrier_offset = cmp1.instruction_address() - method_body;
198     NativeNMethodBarrier cmp2 = NativeNMethodBarrier(nm, entry_point2 + barrier_offset);
199     assert(cmp1.instruction_address() != cmp2.instruction_address(), "sanity");
200     debug_only(cmp2.verify());
201     cmp2.set_value(val);
202 
203     if (method_body != nm->verified_inline_ro_entry_point() && entry_point2 != nm->verified_inline_ro_entry_point()) {
204       NativeNMethodBarrier cmp3 = NativeNMethodBarrier(nm, nm->verified_inline_ro_entry_point() + barrier_offset);
205       assert(cmp1.instruction_address() != cmp3.instruction_address() && cmp2.instruction_address() != cmp3.instruction_address(), "sanity");
206       debug_only(cmp3.verify());
207       cmp3.set_value(val);
208     }
209   }
210 }
211 
212 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
213   if (!supports_entry_barrier(nm)) {
214     return;
215   }
216 
217   if (value == disarmed_guard_value()) {
218     // The patching epoch is incremented before the nmethod is disarmed. Disarming
219     // is performed with a release store. In the nmethod entry barrier, the values
220     // are read in the opposite order, such that the load of the nmethod guard
221     // acquires the patching epoch. This way, the guard is guaranteed to block
222     // entries to the nmethod, until it has safely published the requirement for
223     // further fencing by mutators, before they are allowed to enter.
224     BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
225     bs_asm->increment_patching_epoch();
226   }
227 
228   set_value(nm, value);
229 }
230 
231 int BarrierSetNMethod::guard_value(nmethod* nm) {
232   if (!supports_entry_barrier(nm)) {
233     return disarmed_guard_value();
234   }
235 
236   NativeNMethodBarrier barrier(nm);
237   return barrier.get_value();
238 }
239 
240 #if INCLUDE_JVMCI
241 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
242   NativeNMethodBarrier barrier(nm);
243   return barrier.check_barrier(msg);
244 }
245 #endif