1 /*
  2  * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "code/nativeInst.hpp"
 28 #include "gc/shared/barrierSet.hpp"
 29 #include "gc/shared/barrierSetAssembler.hpp"
 30 #include "gc/shared/barrierSetNMethod.hpp"
 31 #include "logging/log.hpp"
 32 #include "memory/resourceArea.hpp"
 33 #include "runtime/frame.inline.hpp"
 34 #include "runtime/javaThread.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "runtime/registerMap.hpp"
 37 #include "utilities/align.hpp"
 38 #include "utilities/debug.hpp"
 39 
 40 static int slow_path_size(nmethod* nm) {
 41   // The slow path code is out of line with C2
 42   return nm->is_compiled_by_c2() ? 0 : 6;
 43 }
 44 
 45 // This is the offset of the entry barrier from where the frame is completed.
 46 // If any code changes between the end of the verified entry where the entry
 47 // barrier resides, and the completion of the frame, then
 48 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
 49 // not find the expected native instruction at this offset, which needs updating.
 50 // Note that this offset is invariant of PreserveFramePointer.
 51 static int entry_barrier_offset(nmethod* nm) {
 52   BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
 53   switch (bs_asm->nmethod_patching_type()) {
 54   case NMethodPatchingType::stw_instruction_and_data_patch:
 55     return -4 * (4 + slow_path_size(nm));
 56   case NMethodPatchingType::conc_instruction_and_data_patch:
 57     return -4 * (10 + slow_path_size(nm));
 58   case NMethodPatchingType::conc_data_patch:
 59     return -4 * (5 + slow_path_size(nm));
 60   }
 61   ShouldNotReachHere();
 62   return 0;
 63 }
 64 
 65 class NativeNMethodBarrier: public NativeInstruction {
 66   address instruction_address() const { return addr_at(0); }
 67 
 68   int local_guard_offset(nmethod* nm) {
 69     // It's the last instruction
 70     return (-entry_barrier_offset(nm)) - 4;
 71   }
 72 
 73   int *guard_addr(nmethod* nm) {
 74     if (nm->is_compiled_by_c2()) {
 75       // With c2 compiled code, the guard is out-of-line in a stub
 76       // We find it using the RelocIterator.
 77       RelocIterator iter(nm);
 78       while (iter.next()) {
 79         if (iter.type() == relocInfo::entry_guard_type) {
 80           entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
 81           return reinterpret_cast<int*>(reloc->addr());
 82         }
 83       }
 84       ShouldNotReachHere();
 85     }
 86     return reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
 87   }
 88 
 89 public:
 90   int get_value(nmethod* nm) {
 91     return Atomic::load_acquire(guard_addr(nm));
 92   }
 93 
 94   void set_value(nmethod* nm, int value) {
 95     Atomic::release_store(guard_addr(nm), value);
 96   }
 97 
 98   void verify() const;
 99 };
100 
101 // Store the instruction bitmask, bits and name for checking the barrier.
102 struct CheckInsn {
103   uint32_t mask;
104   uint32_t bits;
105   const char *name;
106 };
107 
108 // The first instruction of the nmethod entry barrier is an ldr (literal)
109 // instruction. Verify that it's really there, so the offsets are not skewed.
110 void NativeNMethodBarrier::verify() const {
111   uint32_t* addr = (uint32_t*) instruction_address();
112   uint32_t inst = *addr;
113   if ((inst & 0xff000000) != 0x18000000) {
114     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", (intptr_t)addr, inst);
115     fatal("not an ldr (literal) instruction.");
116   }
117 }
118 
119 
120 /* We're called from an nmethod when we need to deoptimize it. We do
121    this by throwing away the nmethod's frame and jumping to the
122    ic_miss stub. This looks like there has been an IC miss at the
123    entry of the nmethod, so we resolve the call, which will fall back
124    to the interpreter if the nmethod has been unloaded. */
125 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
126 
127   typedef struct {
128     intptr_t *sp; intptr_t *fp; address lr; address pc;
129   } frame_pointers_t;
130 
131   frame_pointers_t *new_frame = (frame_pointers_t *)(return_address_ptr - 5);
132 
133   JavaThread *thread = JavaThread::current();
134   RegisterMap reg_map(thread,
135                       RegisterMap::UpdateMap::skip,
136                       RegisterMap::ProcessFrames::include,
137                       RegisterMap::WalkContinuation::skip);
138   frame frame = thread->last_frame();
139 
140   assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
141   assert(frame.cb() == nm, "must be");
142   frame = frame.sender(&reg_map);
143 
144   LogTarget(Trace, nmethod, barrier) out;
145   if (out.is_enabled()) {
146     ResourceMark mark;
147     log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
148                                 nm->method()->name_and_sig_as_C_string(),
149                                 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
150                                 thread->name(), frame.sp(), nm->verified_entry_point());
151   }
152 
153   new_frame->sp = frame.sp();
154   new_frame->fp = frame.fp();
155   new_frame->lr = frame.pc();
156   new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
157 }
158 
159 static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) {
160   address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
161   NativeNMethodBarrier* barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address);
162   debug_only(barrier->verify());
163   return barrier;
164 }
165 
166 static void set_value(nmethod* nm, jint val) {
167   NativeNMethodBarrier* cmp1 = native_nmethod_barrier(nm);
168   cmp1->set_value(nm, val);
169 
170   if (!nm->is_osr_method() && nm->method()->has_scalarized_args()) {
171     // nmethods with scalarized arguments have multiple entry points that each have an own nmethod entry barrier
172     assert(nm->verified_entry_point() != nm->verified_inline_entry_point(), "scalarized entry point not found");
173     address method_body = nm->is_compiled_by_c1() ? nm->verified_inline_entry_point() : nm->verified_entry_point();
174     address entry_point2 = nm->is_compiled_by_c1() ? nm->verified_entry_point() : nm->verified_inline_entry_point();
175 
176     int barrier_offset = reinterpret_cast<address>(cmp1) - method_body;
177     NativeNMethodBarrier* cmp2 = reinterpret_cast<NativeNMethodBarrier*>(entry_point2 + barrier_offset);
178     assert(cmp1 != cmp2, "sanity");
179     debug_only(cmp2->verify());
180     cmp2->set_value(nm, val);
181 
182     if (method_body != nm->verified_inline_ro_entry_point() && entry_point2 != nm->verified_inline_ro_entry_point()) {
183       NativeNMethodBarrier* cmp3 = reinterpret_cast<NativeNMethodBarrier*>(nm->verified_inline_ro_entry_point() + barrier_offset);
184       assert(cmp1 != cmp3 && cmp2 != cmp3, "sanity");
185       debug_only(cmp3->verify());
186       cmp3->set_value(nm, val);
187     }
188   }
189 }
190 
191 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
192   if (!supports_entry_barrier(nm)) {
193     return;
194   }
195 
196   if (value == disarmed_guard_value()) {
197     // The patching epoch is incremented before the nmethod is disarmed. Disarming
198     // is performed with a release store. In the nmethod entry barrier, the values
199     // are read in the opposite order, such that the load of the nmethod guard
200     // acquires the patching epoch. This way, the guard is guaranteed to block
201     // entries to the nmethod, until it has safely published the requirement for
202     // further fencing by mutators, before they are allowed to enter.
203     BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
204     bs_asm->increment_patching_epoch();
205   }
206 
207   set_value(nm, value);
208 }
209 
210 int BarrierSetNMethod::guard_value(nmethod* nm) {
211   if (!supports_entry_barrier(nm)) {
212     return disarmed_guard_value();
213   }
214 
215   NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
216   return barrier->get_value(nm);
217 }