1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/codeCache.hpp"
 26 #include "code/nativeInst.hpp"
 27 #include "gc/shared/barrierSetNMethod.hpp"
 28 #include "logging/log.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "runtime/javaThread.hpp"
 31 #include "runtime/sharedRuntime.hpp"
 32 #include "utilities/align.hpp"
 33 #include "utilities/debug.hpp"
 34 #include "utilities/formatBuffer.hpp"
 35 #include "utilities/macros.hpp"
 36 #if INCLUDE_JVMCI
 37 #include "jvmci/jvmciRuntime.hpp"
 38 #endif
 39 
 40 class NativeNMethodCmpBarrier: public NativeInstruction {
 41 public:
 42   enum Intel_specific_constants {
 43     instruction_code        = 0x81,
 44     instruction_size        = 8,
 45     imm_offset              = 4,
 46     instruction_rex_prefix  = Assembler::REX | Assembler::REX_B,
 47     instruction_modrm       = 0x7f  // [r15 + offset]
 48   };
 49 
 50   address instruction_address() const { return addr_at(0); }
 51   address immediate_address() const { return addr_at(imm_offset); }
 52 
 53   jint get_immediate() const { return int_at(imm_offset); }
 54   void set_immediate(jint imm) { set_int_at(imm_offset, imm); }
 55   bool check_barrier(err_msg& msg) const;
 56   void verify() const {
 57 #ifdef ASSERT
 58     err_msg msg("%s", "");
 59     assert(check_barrier(msg), "%s", msg.buffer());
 60 #endif
 61   }
 62 };
 63 
 64 bool NativeNMethodCmpBarrier::check_barrier(err_msg& msg) const {
 65   // Only require 4 byte alignment
 66   if (((uintptr_t) instruction_address()) & 0x3) {
 67     msg.print("Addr: " INTPTR_FORMAT " not properly aligned", p2i(instruction_address()));
 68     return false;
 69   }
 70 
 71   int prefix = ubyte_at(0);
 72   if (prefix != instruction_rex_prefix) {
 73     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected 0x%x", p2i(instruction_address()), prefix, instruction_rex_prefix);
 74     return false;
 75   }
 76 
 77   int inst = ubyte_at(1);
 78   if (inst != instruction_code) {
 79     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected 0x%x", p2i(instruction_address()), inst, instruction_code);
 80     return false;
 81   }
 82 
 83   int modrm = ubyte_at(2);
 84   if (modrm != instruction_modrm) {
 85     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected mod/rm 0x%x", p2i(instruction_address()), modrm, instruction_modrm);
 86     return false;
 87   }
 88   return true;
 89 }
 90 
 91 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
 92   /*
 93    * [ callers frame          ]
 94    * [ callers return address ] <- callers rsp
 95    * [ callers rbp            ] <- callers rbp
 96    * [ callers frame slots    ]
 97    * [ return_address         ] <- return_address_ptr
 98    * [ cookie ]                 <- used to write the new rsp (callers rsp)
 99    * [ stub rbp ]
100    * [ stub stuff             ]
101    */
102 
103   address* stub_rbp = return_address_ptr - 2;
104   address* callers_rsp = return_address_ptr + nm->frame_size(); /* points to callers return_address now */
105   address* callers_rbp = callers_rsp - 1; // 1 to move to the callers return address, 1 more to move to the rbp
106   address* cookie = return_address_ptr - 1;
107 
108   LogTarget(Trace, nmethod, barrier) out;
109   if (out.is_enabled()) {
110     JavaThread* jth = JavaThread::current();
111     ResourceMark mark;
112     log_trace(nmethod, barrier)("deoptimize(nmethod: %p, return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
113                                nm, (address *) return_address_ptr, nm->is_osr_method(), jth,
114                                jth->name(), callers_rsp, nm->verified_entry_point());
115   }
116 
117   assert(nm->frame_size() >= 3, "invariant");
118   assert(*cookie == (address) -1, "invariant");
119 
120   // Preserve caller rbp.
121   *stub_rbp = *callers_rbp;
122 
123   // At the cookie address put the callers rsp.
124   *cookie = (address) callers_rsp; // should point to the return address
125 
126   // In the slot that used to be the callers rbp we put the address that our stub needs to jump to at the end.
127   // Overwriting the caller rbp should be okay since our stub rbp has the same value.
128   address* jmp_addr_ptr = callers_rbp;
129   *jmp_addr_ptr = SharedRuntime::get_handle_wrong_method_stub();
130 }
131 
132 // This is the offset of the entry barrier from where the frame is completed.
133 // If any code changes between the end of the verified entry where the entry
134 // barrier resides, and the completion of the frame, then
135 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
136 // not find the expected native instruction at this offset, which needs updating.
137 // Note that this offset is invariant of PreserveFramePointer.
138 static int entry_barrier_offset(nmethod* nm) {
139   if (nm->is_compiled_by_c2()) {
140     return -14;
141   } else {
142     return -15;
143   }
144 }
145 
146 static NativeNMethodCmpBarrier* native_nmethod_barrier(nmethod* nm) {
147   address barrier_address;
148 #if INCLUDE_JVMCI
149   if (nm->is_compiled_by_jvmci()) {
150     barrier_address = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
151   } else
152 #endif
153     {
154       barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
155     }
156 
157   NativeNMethodCmpBarrier* barrier = reinterpret_cast<NativeNMethodCmpBarrier*>(barrier_address);
158   barrier->verify();
159   return barrier;
160 }
161 
162 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
163   if (!supports_entry_barrier(nm)) {
164     return;
165   }
166 
167   NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
168   cmp->set_immediate(value);
169 }
170 
171 int BarrierSetNMethod::guard_value(nmethod* nm) {
172   if (!supports_entry_barrier(nm)) {
173     return disarmed_guard_value();
174   }
175 
176   NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
177   return cmp->get_immediate();
178 }
179 
180 
181 #if INCLUDE_JVMCI
182 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
183   NativeNMethodCmpBarrier* barrier = native_nmethod_barrier(nm);
184   return barrier->check_barrier(msg);
185 }
186 #endif