1 /*
  2  * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "code/nativeInst.hpp"
 28 #include "gc/shared/barrierSet.hpp"
 29 #include "gc/shared/barrierSetAssembler.hpp"
 30 #include "gc/shared/barrierSetNMethod.hpp"
 31 #include "logging/log.hpp"
 32 #include "memory/resourceArea.hpp"
 33 #include "runtime/frame.inline.hpp"
 34 #include "runtime/javaThread.hpp"
 35 #include "runtime/registerMap.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "utilities/align.hpp"
 38 #include "utilities/debug.hpp"
 39 #include "utilities/formatBuffer.hpp"
 40 #if INCLUDE_JVMCI
 41 #include "jvmci/jvmciRuntime.hpp"
 42 #endif
 43 
 44 static int slow_path_size(nmethod* nm) {
 45   // The slow path code is out of line with C2
 46   return nm->is_compiled_by_c2() ? 0 : 6;
 47 }
 48 
 49 // This is the offset of the entry barrier relative to where the frame is completed.
 50 // If any code changes between the end of the verified entry where the entry
 51 // barrier resides, and the completion of the frame, then
 52 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
 53 // not find the expected native instruction at this offset, which needs updating.
 54 // Note that this offset is invariant of PreserveFramePointer.
 55 static int entry_barrier_offset(nmethod* nm) {
 56   BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
 57   switch (bs_asm->nmethod_patching_type()) {
 58   case NMethodPatchingType::stw_instruction_and_data_patch:
 59     return -4 * (4 + slow_path_size(nm));
 60   case NMethodPatchingType::conc_instruction_and_data_patch:
 61     return -4 * (10 + slow_path_size(nm));
 62   case NMethodPatchingType::conc_data_patch:
 63     return -4 * (5 + slow_path_size(nm));
 64   }
 65   ShouldNotReachHere();
 66   return 0;
 67 }
 68 
 69 class NativeNMethodBarrier {
 70   address  _instruction_address;
 71   int*     _guard_addr;
 72   nmethod* _nm;
 73 
 74   address instruction_address() const { return _instruction_address; }
 75 
 76   int *guard_addr() {
 77     return _guard_addr;
 78   }
 79 
 80   int local_guard_offset(nmethod* nm) {
 81     // It's the last instruction
 82     return (-entry_barrier_offset(nm)) - 4;
 83   }
 84 
 85 public:
 86   NativeNMethodBarrier(nmethod* nm): _nm(nm) {
 87 #if INCLUDE_JVMCI
 88     if (nm->is_compiled_by_jvmci()) {
 89       address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
 90       RelocIterator iter(nm, pc, pc + 4);
 91       guarantee(iter.next(), "missing relocs");
 92       guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");
 93 
 94       _guard_addr = (int*) iter.section_word_reloc()->target();
 95       _instruction_address = pc;
 96     } else
 97 #endif
 98       {
 99         _instruction_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
100         if (nm->is_compiled_by_c2()) {
101           // With c2 compiled code, the guard is out-of-line in a stub
102           // We find it using the RelocIterator.
103           RelocIterator iter(nm);
104           while (iter.next()) {
105             if (iter.type() == relocInfo::entry_guard_type) {
106               entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
107               _guard_addr = reinterpret_cast<int*>(reloc->addr());
108               return;
109             }
110           }
111           ShouldNotReachHere();
112         }
113         _guard_addr =  reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
114       }
115   }
116 
117   int get_value() {
118     return Atomic::load_acquire(guard_addr());
119   }
120 
121   void set_value(int value) {
122     Atomic::release_store(guard_addr(), value);
123   }
124 
125   bool check_barrier(err_msg& msg) const;
126   void verify() const {
127     err_msg msg("%s", "");
128     assert(check_barrier(msg), "%s", msg.buffer());
129   }
130 };
131 
132 // The first instruction of the nmethod entry barrier is an ldr (literal)
133 // instruction. Verify that it's really there, so the offsets are not skewed.
134 bool NativeNMethodBarrier::check_barrier(err_msg& msg) const {
135   uint32_t* addr = (uint32_t*) instruction_address();
136   uint32_t inst = *addr;
137   if ((inst & 0xff000000) != 0x18000000) {
138     msg.print("Nmethod entry barrier did not start with ldr (literal) as expected. "
139               "Addr: " PTR_FORMAT " Code: " UINT32_FORMAT, p2i(addr), inst);
140     return false;
141   }
142   return true;
143 }
144 
145 
146 /* We're called from an nmethod when we need to deoptimize it. We do
147    this by throwing away the nmethod's frame and jumping to the
148    ic_miss stub. This looks like there has been an IC miss at the
149    entry of the nmethod, so we resolve the call, which will fall back
150    to the interpreter if the nmethod has been unloaded. */
151 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
152 
153   typedef struct {
154     intptr_t *sp; intptr_t *fp; address lr; address pc;
155   } frame_pointers_t;
156 
157   frame_pointers_t *new_frame = (frame_pointers_t *)(return_address_ptr - 5);
158 
159   JavaThread *thread = JavaThread::current();
160   RegisterMap reg_map(thread,
161                       RegisterMap::UpdateMap::skip,
162                       RegisterMap::ProcessFrames::include,
163                       RegisterMap::WalkContinuation::skip);
164   frame frame = thread->last_frame();
165 
166   assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
167   assert(frame.cb() == nm, "must be");
168   frame = frame.sender(&reg_map);
169 
170   LogTarget(Trace, nmethod, barrier) out;
171   if (out.is_enabled()) {
172     ResourceMark mark;
173     log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
174                                 nm->method()->name_and_sig_as_C_string(),
175                                 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
176                                 thread->name(), frame.sp(), nm->verified_entry_point());
177   }
178 
179   new_frame->sp = frame.sp();
180   new_frame->fp = frame.fp();
181   new_frame->lr = frame.pc();
182   new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
183 }
184 
185 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
186   if (!supports_entry_barrier(nm)) {
187     return;
188   }
189 
190   if (value == disarmed_guard_value()) {
191     // The patching epoch is incremented before the nmethod is disarmed. Disarming
192     // is performed with a release store. In the nmethod entry barrier, the values
193     // are read in the opposite order, such that the load of the nmethod guard
194     // acquires the patching epoch. This way, the guard is guaranteed to block
195     // entries to the nmethod, until it has safely published the requirement for
196     // further fencing by mutators, before they are allowed to enter.
197     BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
198     bs_asm->increment_patching_epoch();
199   }
200 
201   NativeNMethodBarrier barrier(nm);
202   barrier.set_value(value);
203 }
204 
205 int BarrierSetNMethod::guard_value(nmethod* nm) {
206   if (!supports_entry_barrier(nm)) {
207     return disarmed_guard_value();
208   }
209 
210   NativeNMethodBarrier barrier(nm);
211   return barrier.get_value();
212 }
213 
214 #if INCLUDE_JVMCI
215 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
216   NativeNMethodBarrier barrier(nm);
217   return barrier.check_barrier(msg);
218 }
219 #endif