1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/codeCache.hpp"
 26 #include "code/nativeInst.hpp"
 27 #include "gc/shared/barrierSetNMethod.hpp"
 28 #include "logging/log.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "runtime/javaThread.hpp"
 31 #include "runtime/sharedRuntime.hpp"
 32 #include "utilities/align.hpp"
 33 #include "utilities/debug.hpp"
 34 #include "utilities/formatBuffer.hpp"
 35 #include "utilities/macros.hpp"
 36 #if INCLUDE_JVMCI
 37 #include "jvmci/jvmciRuntime.hpp"
 38 #endif
 39 
 40 class NativeNMethodCmpBarrier: public NativeInstruction {
 41 public:
 42 #ifdef _LP64
 43   enum Intel_specific_constants {
 44     instruction_code        = 0x81,
 45     instruction_size        = 8,
 46     imm_offset              = 4,
 47     instruction_rex_prefix  = Assembler::REX | Assembler::REX_B,
 48     instruction_modrm       = 0x7f  // [r15 + offset]
 49   };
 50 #else
 51   enum Intel_specific_constants {
 52     instruction_code        = 0x81,
 53     instruction_size        = 7,
 54     imm_offset              = 2,
 55     instruction_modrm       = 0x3f  // [rdi]
 56   };
 57 #endif
 58 
 59   address instruction_address() const { return addr_at(0); }
 60   address immediate_address() const { return addr_at(imm_offset); }
 61 
 62   jint get_immediate() const { return int_at(imm_offset); }
 63   void set_immediate(jint imm) { set_int_at(imm_offset, imm); }
 64   bool check_barrier(err_msg& msg) const;
 65   void verify() const {
 66 #ifdef ASSERT
 67     err_msg msg("%s", "");
 68     assert(check_barrier(msg), "%s", msg.buffer());
 69 #endif
 70   }
 71 };
 72 
 73 #ifdef _LP64
 74 bool NativeNMethodCmpBarrier::check_barrier(err_msg& msg) const {
 75   // Only require 4 byte alignment
 76   if (((uintptr_t) instruction_address()) & 0x3) {
 77     msg.print("Addr: " INTPTR_FORMAT " not properly aligned", p2i(instruction_address()));
 78     return false;
 79   }
 80 
 81   int prefix = ubyte_at(0);
 82   if (prefix != instruction_rex_prefix) {
 83     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected 0x%x", p2i(instruction_address()), prefix, instruction_rex_prefix);
 84     return false;
 85   }
 86 
 87   int inst = ubyte_at(1);
 88   if (inst != instruction_code) {
 89     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected 0x%x", p2i(instruction_address()), inst, instruction_code);
 90     return false;
 91   }
 92 
 93   int modrm = ubyte_at(2);
 94   if (modrm != instruction_modrm) {
 95     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected mod/rm 0x%x", p2i(instruction_address()), modrm, instruction_modrm);
 96     return false;
 97   }
 98   return true;
 99 }
100 #else
101 bool NativeNMethodCmpBarrier::check_barrier(err_msg& msg) const {
102   if (((uintptr_t) instruction_address()) & 0x3) {
103     msg.print("Addr: " INTPTR_FORMAT " not properly aligned", p2i(instruction_address()));
104     return false;
105   }
106 
107   int inst = ubyte_at(0);
108   if (inst != instruction_code) {
109     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
110         inst);
111     return false;
112   }
113 
114   int modrm = ubyte_at(1);
115   if (modrm != instruction_modrm) {
116     msg.print("Addr: " INTPTR_FORMAT " mod/rm: 0x%x", p2i(instruction_address()),
117         modrm);
118     return false;
119   }
120   return true;
121 }
122 #endif // _LP64
123 
124 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
125   /*
126    * [ callers frame          ]
127    * [ callers return address ] <- callers rsp
128    * [ callers rbp            ] <- callers rbp
129    * [ callers frame slots    ]
130    * [ return_address         ] <- return_address_ptr
131    * [ cookie ]                 <- used to write the new rsp (callers rsp)
132    * [ stub rbp ]
133    * [ stub stuff             ]
134    */
135 
136   address* stub_rbp = return_address_ptr - 2;
137   address* callers_rsp = return_address_ptr + nm->frame_size(); /* points to callers return_address now */
138   address* callers_rbp = callers_rsp - 1; // 1 to move to the callers return address, 1 more to move to the rbp
139   address* cookie = return_address_ptr - 1;
140 
141   LogTarget(Trace, nmethod, barrier) out;
142   if (out.is_enabled()) {
143     JavaThread* jth = JavaThread::current();
144     ResourceMark mark;
145     log_trace(nmethod, barrier)("deoptimize(nmethod: %p, return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
146                                nm, (address *) return_address_ptr, nm->is_osr_method(), jth,
147                                jth->name(), callers_rsp, nm->verified_entry_point());
148   }
149 
150   assert(nm->frame_size() >= 3, "invariant");
151   assert(*cookie == (address) -1, "invariant");
152 
153   // Preserve caller rbp.
154   *stub_rbp = *callers_rbp;
155 
156   // At the cookie address put the callers rsp.
157   *cookie = (address) callers_rsp; // should point to the return address
158 
159   // In the slot that used to be the callers rbp we put the address that our stub needs to jump to at the end.
160   // Overwriting the caller rbp should be okay since our stub rbp has the same value.
161   address* jmp_addr_ptr = callers_rbp;
162   *jmp_addr_ptr = SharedRuntime::get_handle_wrong_method_stub();
163 }
164 
165 // This is the offset of the entry barrier from where the frame is completed.
166 // If any code changes between the end of the verified entry where the entry
167 // barrier resides, and the completion of the frame, then
168 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
169 // not find the expected native instruction at this offset, which needs updating.
170 // Note that this offset is invariant of PreserveFramePointer.
171 static int entry_barrier_offset(nmethod* nm) {
172 #ifdef _LP64
173   if (nm->is_compiled_by_c2()) {
174     return -14;
175   } else {
176     return -15;
177   }
178 #else
179   return -18;
180 #endif
181 }
182 
183 static NativeNMethodCmpBarrier* native_nmethod_barrier(nmethod* nm) {
184   address barrier_address;
185 #if INCLUDE_JVMCI
186   if (nm->is_compiled_by_jvmci()) {
187     barrier_address = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
188   } else
189 #endif
190     {
191       barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
192     }
193 
194   NativeNMethodCmpBarrier* barrier = reinterpret_cast<NativeNMethodCmpBarrier*>(barrier_address);
195   barrier->verify();
196   return barrier;
197 }
198 
199 static void set_immediate(nmethod* nm, jint val) {
200   NativeNMethodCmpBarrier* cmp1 = native_nmethod_barrier(nm);
201   cmp1->set_immediate(val);
202 
203   if (!nm->is_osr_method() && nm->method()->has_scalarized_args()) {
204     // nmethods with scalarized arguments have multiple entry points that each have an own nmethod entry barrier
205     assert(nm->verified_entry_point() != nm->verified_inline_entry_point(), "scalarized entry point not found");
206     address method_body = nm->is_compiled_by_c1() ? nm->verified_inline_entry_point() : nm->verified_entry_point();
207     address entry_point2 = nm->is_compiled_by_c1() ? nm->verified_entry_point() : nm->verified_inline_entry_point();
208 
209     int barrier_offset = reinterpret_cast<address>(cmp1) - method_body;
210     NativeNMethodCmpBarrier* cmp2 = reinterpret_cast<NativeNMethodCmpBarrier*>(entry_point2 + barrier_offset);
211     assert(cmp1 != cmp2, "sanity");
212     debug_only(cmp2->verify());
213     cmp2->set_immediate(val);
214 
215     if (method_body != nm->verified_inline_ro_entry_point() && entry_point2 != nm->verified_inline_ro_entry_point()) {
216       NativeNMethodCmpBarrier* cmp3 = reinterpret_cast<NativeNMethodCmpBarrier*>(nm->verified_inline_ro_entry_point() + barrier_offset);
217       assert(cmp1 != cmp3 && cmp2 != cmp3, "sanity");
218       debug_only(cmp3->verify());
219       cmp3->set_immediate(val);
220     }
221   }
222 }
223 
224 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
225   if (!supports_entry_barrier(nm)) {
226     return;
227   }
228 
229   set_immediate(nm, value);
230 }
231 
232 int BarrierSetNMethod::guard_value(nmethod* nm) {
233   if (!supports_entry_barrier(nm)) {
234     return disarmed_guard_value();
235   }
236 
237   NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
238   return cmp->get_immediate();
239 }
240 
241 
242 #if INCLUDE_JVMCI
243 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
244   NativeNMethodCmpBarrier* barrier = native_nmethod_barrier(nm);
245   return barrier->check_barrier(msg);
246 }
247 #endif