1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/codeCache.hpp"
 26 #include "code/nativeInst.hpp"
 27 #include "gc/shared/barrierSetNMethod.hpp"
 28 #include "logging/log.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "runtime/javaThread.hpp"
 31 #include "runtime/sharedRuntime.hpp"
 32 #include "utilities/align.hpp"
 33 #include "utilities/debug.hpp"
 34 #include "utilities/formatBuffer.hpp"
 35 #include "utilities/macros.hpp"
 36 #if INCLUDE_JVMCI
 37 #include "jvmci/jvmciRuntime.hpp"
 38 #endif
 39 
 40 class NativeNMethodCmpBarrier: public NativeInstruction {
 41 public:
 42   enum Intel_specific_constants {
 43     instruction_code        = 0x81,
 44     instruction_size        = 8,
 45     imm_offset              = 4,
 46     instruction_rex_prefix  = Assembler::REX | Assembler::REX_B,
 47     instruction_modrm       = 0x7f  // [r15 + offset]
 48   };
 49 
 50   address instruction_address() const { return addr_at(0); }
 51   address immediate_address() const { return addr_at(imm_offset); }
 52 
 53   NativeNMethodCmpBarrier* nativeNMethodCmpBarrier_at(address a) { return (NativeNMethodCmpBarrier*)a; }
 54 
 55   jint get_immediate() const { return int_at(imm_offset); }
 56   void set_immediate(jint imm, int bit_mask) {
 57     if (bit_mask == ~0) {
 58       set_int_at(imm_offset, imm);
 59       return;
 60     }
 61 
 62     assert((imm & ~bit_mask) == 0, "trying to set bits outside the mask");
 63     imm &= bit_mask;
 64 
 65     assert(align_up(immediate_address(), sizeof(jint)) ==
 66            align_down(immediate_address(), sizeof(jint)), "immediate not aligned");
 67     jint* data_addr = (jint*)immediate_address();
 68     jint old_value = AtomicAccess::load(data_addr);
 69     while (true) {
 70       // Only bits in the mask are changed
 71       jint new_value = imm | (old_value & ~bit_mask);
 72       if (new_value == old_value) break;
 73       jint v = AtomicAccess::cmpxchg(data_addr, old_value, new_value, memory_order_release);
 74       if (v == old_value) break;
 75       old_value = v;
 76     }
 77   }
 78   bool check_barrier(err_msg& msg) const;
 79   void verify() const {
 80 #ifdef ASSERT
 81     err_msg msg("%s", "");
 82     assert(check_barrier(msg), "%s", msg.buffer());
 83 #endif
 84   }
 85 };
 86 
 87 bool NativeNMethodCmpBarrier::check_barrier(err_msg& msg) const {
 88   // Only require 4 byte alignment
 89   if (((uintptr_t) instruction_address()) & 0x3) {
 90     msg.print("Addr: " INTPTR_FORMAT " not properly aligned", p2i(instruction_address()));
 91     return false;
 92   }
 93 
 94   int prefix = ubyte_at(0);
 95   if (prefix != instruction_rex_prefix) {
 96     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected 0x%x", p2i(instruction_address()), prefix, instruction_rex_prefix);
 97     return false;
 98   }
 99 
100   int inst = ubyte_at(1);
101   if (inst != instruction_code) {
102     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected 0x%x", p2i(instruction_address()), inst, instruction_code);
103     return false;
104   }
105 
106   int modrm = ubyte_at(2);
107   if (modrm != instruction_modrm) {
108     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected mod/rm 0x%x", p2i(instruction_address()), modrm, instruction_modrm);
109     return false;
110   }
111   return true;
112 }
113 
114 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
115   /*
116    * [ callers frame          ]
117    * [ callers return address ] <- callers rsp
118    * [ callers rbp            ] <- callers rbp
119    * [ callers frame slots    ]
120    * [ return_address         ] <- return_address_ptr
121    * [ cookie ]                 <- used to write the new rsp (callers rsp)
122    * [ stub rbp ]
123    * [ stub stuff             ]
124    */
125 
126   address* stub_rbp = return_address_ptr - 2;
127   address* callers_rsp = return_address_ptr + nm->frame_size(); /* points to callers return_address now */
128   address* callers_rbp = callers_rsp - 1; // 1 to move to the callers return address, 1 more to move to the rbp
129   address* cookie = return_address_ptr - 1;
130 
131   LogTarget(Trace, nmethod, barrier) out;
132   if (out.is_enabled()) {
133     JavaThread* jth = JavaThread::current();
134     ResourceMark mark;
135     log_trace(nmethod, barrier)("deoptimize(nmethod: %p, return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
136                                nm, (address *) return_address_ptr, nm->is_osr_method(), jth,
137                                jth->name(), callers_rsp, nm->verified_entry_point());
138   }
139 
140   assert(nm->frame_size() >= 3, "invariant");
141   assert(*cookie == (address) -1, "invariant");
142 
143   // Preserve caller rbp.
144   *stub_rbp = *callers_rbp;
145 
146   // At the cookie address put the callers rsp.
147   *cookie = (address) callers_rsp; // should point to the return address
148 
149   // In the slot that used to be the callers rbp we put the address that our stub needs to jump to at the end.
150   // Overwriting the caller rbp should be okay since our stub rbp has the same value.
151   address* jmp_addr_ptr = callers_rbp;
152   *jmp_addr_ptr = SharedRuntime::get_handle_wrong_method_stub();
153 }
154 
155 // This is the offset of the entry barrier from where the frame is completed.
156 // If any code changes between the end of the verified entry where the entry
157 // barrier resides, and the completion of the frame, then
158 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
159 // not find the expected native instruction at this offset, which needs updating.
160 // Note that this offset is invariant of PreserveFramePointer.
161 static int entry_barrier_offset(nmethod* nm) {
162   if (nm->is_compiled_by_c2()) {
163     return -14;
164   } else {
165     return -15;
166   }
167 }
168 
169 static NativeNMethodCmpBarrier* native_nmethod_barrier(nmethod* nm) {
170   address barrier_address;
171 #if INCLUDE_JVMCI
172   if (nm->is_compiled_by_jvmci()) {
173     barrier_address = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
174   } else
175 #endif
176     {
177       barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
178     }
179 
180   NativeNMethodCmpBarrier* barrier = reinterpret_cast<NativeNMethodCmpBarrier*>(barrier_address);
181   barrier->verify();
182   return barrier;
183 }
184 
185 static void set_immediate(nmethod* nm, jint val, int bit_mask) {
186   NativeNMethodCmpBarrier* cmp1 = native_nmethod_barrier(nm);
187   cmp1->set_immediate(val, bit_mask);
188 
189   if (!nm->is_osr_method() && nm->method()->has_scalarized_args()) {
190     // nmethods with scalarized arguments have multiple entry points that each have an own nmethod entry barrier
191     assert(nm->verified_entry_point() != nm->verified_inline_entry_point(), "scalarized entry point not found");
192     address method_body = nm->is_compiled_by_c1() ? nm->verified_inline_entry_point() : nm->verified_entry_point();
193     address entry_point2 = nm->is_compiled_by_c1() ? nm->verified_entry_point() : nm->verified_inline_entry_point();
194 
195     int barrier_offset = reinterpret_cast<address>(cmp1) - method_body;
196     NativeNMethodCmpBarrier* cmp2 = reinterpret_cast<NativeNMethodCmpBarrier*>(entry_point2 + barrier_offset);
197     assert(cmp1 != cmp2, "sanity");
198     DEBUG_ONLY(cmp2->verify());
199     cmp2->set_immediate(val, bit_mask);
200 
201     if (method_body != nm->verified_inline_ro_entry_point() && entry_point2 != nm->verified_inline_ro_entry_point()) {
202       NativeNMethodCmpBarrier* cmp3 = reinterpret_cast<NativeNMethodCmpBarrier*>(nm->verified_inline_ro_entry_point() + barrier_offset);
203       assert(cmp1 != cmp3 && cmp2 != cmp3, "sanity");
204       DEBUG_ONLY(cmp3->verify());
205       cmp3->set_immediate(val, bit_mask);
206     }
207   }
208 }
209 
210 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value, int bit_mask) {
211   if (!supports_entry_barrier(nm)) {
212     return;
213   }
214 
215   set_immediate(nm, value, bit_mask);
216 }
217 
218 int BarrierSetNMethod::guard_value(nmethod* nm) {
219   if (!supports_entry_barrier(nm)) {
220     return disarmed_guard_value();
221   }
222 
223   NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
224   return cmp->get_immediate();
225 }
226 
227 
228 #if INCLUDE_JVMCI
229 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
230   NativeNMethodCmpBarrier* barrier = native_nmethod_barrier(nm);
231   return barrier->check_barrier(msg);
232 }
233 #endif