1 /*
  2  * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "code/nativeInst.hpp"
 28 #include "gc/shared/barrierSetNMethod.hpp"
 29 #include "logging/log.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "runtime/javaThread.hpp"
 32 #include "runtime/sharedRuntime.hpp"
 33 #include "utilities/align.hpp"
 34 #include "utilities/debug.hpp"
 35 #include "utilities/formatBuffer.hpp"
 36 #include "utilities/macros.hpp"
 37 #if INCLUDE_JVMCI
 38 #include "jvmci/jvmciRuntime.hpp"
 39 #endif
 40 
 41 class NativeNMethodCmpBarrier: public NativeInstruction {
 42 public:
 43 #ifdef _LP64
 44   enum Intel_specific_constants {
 45     instruction_code        = 0x81,
 46     instruction_size        = 8,
 47     imm_offset              = 4,
 48     instruction_rex_prefix  = Assembler::REX | Assembler::REX_B,
 49     instruction_modrm       = 0x7f  // [r15 + offset]
 50   };
 51 #else
 52   enum Intel_specific_constants {
 53     instruction_code        = 0x81,
 54     instruction_size        = 7,
 55     imm_offset              = 2,
 56     instruction_modrm       = 0x3f  // [rdi]
 57   };
 58 #endif
 59 
 60   address instruction_address() const { return addr_at(0); }
 61   address immediate_address() const { return addr_at(imm_offset); }
 62 
 63   jint get_immediate() const { return int_at(imm_offset); }
 64   void set_immediate(jint imm) { set_int_at(imm_offset, imm); }
 65   bool check_barrier(err_msg& msg) const;
 66   void verify() const {
 67 #ifdef ASSERT
 68     err_msg msg("%s", "");
 69     assert(check_barrier(msg), "%s", msg.buffer());
 70 #endif
 71   }
 72 };
 73 
 74 #ifdef _LP64
 75 bool NativeNMethodCmpBarrier::check_barrier(err_msg& msg) const {
 76   // Only require 4 byte alignment
 77   if (((uintptr_t) instruction_address()) & 0x3) {
 78     msg.print("Addr: " INTPTR_FORMAT " not properly aligned", p2i(instruction_address()));
 79     return false;
 80   }
 81 
 82   int prefix = ubyte_at(0);
 83   if (prefix != instruction_rex_prefix) {
 84     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected 0x%x", p2i(instruction_address()), prefix, instruction_rex_prefix);
 85     return false;
 86   }
 87 
 88   int inst = ubyte_at(1);
 89   if (inst != instruction_code) {
 90     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected 0x%x", p2i(instruction_address()), inst, instruction_code);
 91     return false;
 92   }
 93 
 94   int modrm = ubyte_at(2);
 95   if (modrm != instruction_modrm) {
 96     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected mod/rm 0x%x", p2i(instruction_address()), modrm, instruction_modrm);
 97     return false;
 98   }
 99   return true;
100 }
101 #else
102 bool NativeNMethodCmpBarrier::check_barrier(err_msg& msg) const {
103   if (((uintptr_t) instruction_address()) & 0x3) {
104     msg.print("Addr: " INTPTR_FORMAT " not properly aligned", p2i(instruction_address()));
105     return false;
106   }
107 
108   int inst = ubyte_at(0);
109   if (inst != instruction_code) {
110     msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
111         inst);
112     return false;
113   }
114 
115   int modrm = ubyte_at(1);
116   if (modrm != instruction_modrm) {
117     msg.print("Addr: " INTPTR_FORMAT " mod/rm: 0x%x", p2i(instruction_address()),
118         modrm);
119     return false;
120   }
121   return true;
122 }
123 #endif // _LP64
124 
125 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
126   /*
127    * [ callers frame          ]
128    * [ callers return address ] <- callers rsp
129    * [ callers rbp            ] <- callers rbp
130    * [ callers frame slots    ]
131    * [ return_address         ] <- return_address_ptr
132    * [ cookie ]                 <- used to write the new rsp (callers rsp)
133    * [ stub rbp ]
134    * [ stub stuff             ]
135    */
136 
137   address* stub_rbp = return_address_ptr - 2;
138   address* callers_rsp = return_address_ptr + nm->frame_size(); /* points to callers return_address now */
139   address* callers_rbp = callers_rsp - 1; // 1 to move to the callers return address, 1 more to move to the rbp
140   address* cookie = return_address_ptr - 1;
141 
142   LogTarget(Trace, nmethod, barrier) out;
143   if (out.is_enabled()) {
144     JavaThread* jth = JavaThread::current();
145     ResourceMark mark;
146     log_trace(nmethod, barrier)("deoptimize(nmethod: %p, return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
147                                nm, (address *) return_address_ptr, nm->is_osr_method(), jth,
148                                jth->name(), callers_rsp, nm->verified_entry_point());
149   }
150 
151   assert(nm->frame_size() >= 3, "invariant");
152   assert(*cookie == (address) -1, "invariant");
153 
154   // Preserve caller rbp.
155   *stub_rbp = *callers_rbp;
156 
157   // At the cookie address put the callers rsp.
158   *cookie = (address) callers_rsp; // should point to the return address
159 
160   // In the slot that used to be the callers rbp we put the address that our stub needs to jump to at the end.
161   // Overwriting the caller rbp should be okay since our stub rbp has the same value.
162   address* jmp_addr_ptr = callers_rbp;
163   *jmp_addr_ptr = SharedRuntime::get_handle_wrong_method_stub();
164 }
165 
166 // This is the offset of the entry barrier from where the frame is completed.
167 // If any code changes between the end of the verified entry where the entry
168 // barrier resides, and the completion of the frame, then
169 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
170 // not find the expected native instruction at this offset, which needs updating.
171 // Note that this offset is invariant of PreserveFramePointer.
172 static int entry_barrier_offset(nmethod* nm) {
173 #ifdef _LP64
174   if (nm->is_compiled_by_c2()) {
175     return -14;
176   } else {
177     return -15;
178   }
179 #else
180   return -18;
181 #endif
182 }
183 
184 static NativeNMethodCmpBarrier* native_nmethod_barrier(nmethod* nm) {
185   address barrier_address;
186 #if INCLUDE_JVMCI
187   if (nm->is_compiled_by_jvmci()) {
188     barrier_address = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
189   } else
190 #endif
191     {
192       barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
193     }
194 
195   NativeNMethodCmpBarrier* barrier = reinterpret_cast<NativeNMethodCmpBarrier*>(barrier_address);
196   barrier->verify();
197   return barrier;
198 }
199 
200 static void set_immediate(nmethod* nm, jint val) {
201   NativeNMethodCmpBarrier* cmp1 = native_nmethod_barrier(nm);
202   cmp1->set_immediate(val);
203 
204   if (!nm->is_osr_method() && nm->method()->has_scalarized_args()) {
205     // nmethods with scalarized arguments have multiple entry points that each have an own nmethod entry barrier
206     assert(nm->verified_entry_point() != nm->verified_inline_entry_point(), "scalarized entry point not found");
207     address method_body = nm->is_compiled_by_c1() ? nm->verified_inline_entry_point() : nm->verified_entry_point();
208     address entry_point2 = nm->is_compiled_by_c1() ? nm->verified_entry_point() : nm->verified_inline_entry_point();
209 
210     int barrier_offset = reinterpret_cast<address>(cmp1) - method_body;
211     NativeNMethodCmpBarrier* cmp2 = reinterpret_cast<NativeNMethodCmpBarrier*>(entry_point2 + barrier_offset);
212     assert(cmp1 != cmp2, "sanity");
213     debug_only(cmp2->verify());
214     cmp2->set_immediate(val);
215 
216     if (method_body != nm->verified_inline_ro_entry_point() && entry_point2 != nm->verified_inline_ro_entry_point()) {
217       NativeNMethodCmpBarrier* cmp3 = reinterpret_cast<NativeNMethodCmpBarrier*>(nm->verified_inline_ro_entry_point() + barrier_offset);
218       assert(cmp1 != cmp3 && cmp2 != cmp3, "sanity");
219       debug_only(cmp3->verify());
220       cmp3->set_immediate(val);
221     }
222   }
223 }
224 
225 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
226   if (!supports_entry_barrier(nm)) {
227     return;
228   }
229 
230   set_immediate(nm, value);
231 }
232 
233 int BarrierSetNMethod::guard_value(nmethod* nm) {
234   if (!supports_entry_barrier(nm)) {
235     return disarmed_guard_value();
236   }
237 
238   NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
239   return cmp->get_immediate();
240 }
241 
242 
243 #if INCLUDE_JVMCI
244 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
245   NativeNMethodCmpBarrier* barrier = native_nmethod_barrier(nm);
246   return barrier->check_barrier(msg);
247 }
248 #endif