1 /*
  2  * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "code/nativeInst.hpp"
 28 #include "gc/shared/barrierSetNMethod.hpp"
 29 #include "logging/log.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "runtime/javaThread.hpp"
 32 #include "runtime/sharedRuntime.hpp"
 33 #include "utilities/align.hpp"
 34 #include "utilities/debug.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 class NativeNMethodCmpBarrier: public NativeInstruction {
 38 public:
 39 #ifdef _LP64
 40   enum Intel_specific_constants {
 41     instruction_code        = 0x81,
 42     instruction_size        = 8,
 43     imm_offset              = 4,
 44     instruction_rex_prefix  = Assembler::REX | Assembler::REX_B,
 45     instruction_modrm       = 0x7f  // [r15 + offset]
 46   };
 47 #else
 48   enum Intel_specific_constants {
 49     instruction_code        = 0x81,
 50     instruction_size        = 7,
 51     imm_offset              = 2,
 52     instruction_modrm       = 0x3f  // [rdi]
 53   };
 54 #endif
 55 
 56   address instruction_address() const { return addr_at(0); }
 57   address immediate_address() const { return addr_at(imm_offset); }
 58 
 59   jint get_immediate() const { return int_at(imm_offset); }
 60   void set_immediate(jint imm) { set_int_at(imm_offset, imm); }
 61   void verify() const;
 62 };
 63 
 64 #ifdef _LP64
 65 void NativeNMethodCmpBarrier::verify() const {
 66   if (((uintptr_t) instruction_address()) & 0x3) {
 67     fatal("Not properly aligned");
 68   }
 69 
 70   int prefix = ubyte_at(0);
 71   if (prefix != instruction_rex_prefix) {
 72     tty->print_cr("Addr: " INTPTR_FORMAT " Prefix: 0x%x", p2i(instruction_address()),
 73         prefix);
 74     fatal("not a cmp barrier");
 75   }
 76 
 77   int inst = ubyte_at(1);
 78   if (inst != instruction_code) {
 79     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
 80         inst);
 81     fatal("not a cmp barrier");
 82   }
 83 
 84   int modrm = ubyte_at(2);
 85   if (modrm != instruction_modrm) {
 86     tty->print_cr("Addr: " INTPTR_FORMAT " mod/rm: 0x%x", p2i(instruction_address()),
 87         modrm);
 88     fatal("not a cmp barrier");
 89   }
 90 }
 91 #else
 92 void NativeNMethodCmpBarrier::verify() const {
 93   if (((uintptr_t) instruction_address()) & 0x3) {
 94     fatal("Not properly aligned");
 95   }
 96 
 97   int inst = ubyte_at(0);
 98   if (inst != instruction_code) {
 99     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
100         inst);
101     fatal("not a cmp barrier");
102   }
103 
104   int modrm = ubyte_at(1);
105   if (modrm != instruction_modrm) {
106     tty->print_cr("Addr: " INTPTR_FORMAT " mod/rm: 0x%x", p2i(instruction_address()),
107         modrm);
108     fatal("not a cmp barrier");
109   }
110 }
111 #endif // _LP64
112 
113 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
114   /*
115    * [ callers frame          ]
116    * [ callers return address ] <- callers rsp
117    * [ callers rbp            ] <- callers rbp
118    * [ callers frame slots    ]
119    * [ return_address         ] <- return_address_ptr
120    * [ cookie ]                 <- used to write the new rsp (callers rsp)
121    * [ stub rbp ]
122    * [ stub stuff             ]
123    */
124 
125   address* stub_rbp = return_address_ptr - 2;
126   address* callers_rsp = return_address_ptr + nm->frame_size(); /* points to callers return_address now */
127   address* callers_rbp = callers_rsp - 1; // 1 to move to the callers return address, 1 more to move to the rbp
128   address* cookie = return_address_ptr - 1;
129 
130   LogTarget(Trace, nmethod, barrier) out;
131   if (out.is_enabled()) {
132     JavaThread* jth = JavaThread::current();
133     ResourceMark mark;
134     log_trace(nmethod, barrier)("deoptimize(nmethod: %p, return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
135                                nm, (address *) return_address_ptr, nm->is_osr_method(), jth,
136                                jth->name(), callers_rsp, nm->verified_entry_point());
137   }
138 
139   assert(nm->frame_size() >= 3, "invariant");
140   assert(*cookie == (address) -1, "invariant");
141 
142   // Preserve caller rbp.
143   *stub_rbp = *callers_rbp;
144 
145   // At the cookie address put the callers rsp.
146   *cookie = (address) callers_rsp; // should point to the return address
147 
148   // In the slot that used to be the callers rbp we put the address that our stub needs to jump to at the end.
149   // Overwriting the caller rbp should be okay since our stub rbp has the same value.
150   address* jmp_addr_ptr = callers_rbp;
151   *jmp_addr_ptr = SharedRuntime::get_handle_wrong_method_stub();
152 }
153 
154 // This is the offset of the entry barrier from where the frame is completed.
155 // If any code changes between the end of the verified entry where the entry
156 // barrier resides, and the completion of the frame, then
157 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
158 // not find the expected native instruction at this offset, which needs updating.
159 // Note that this offset is invariant of PreserveFramePointer.
160 static const int entry_barrier_offset(nmethod* nm) {
161 #ifdef _LP64
162   if (nm->is_compiled_by_c2()) {
163     return -14;
164   } else {
165     return -15;
166   }
167 #else
168   return -18;
169 #endif
170 }
171 
172 static NativeNMethodCmpBarrier* native_nmethod_barrier(nmethod* nm) {
173   address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
174   NativeNMethodCmpBarrier* barrier = reinterpret_cast<NativeNMethodCmpBarrier*>(barrier_address);
175   debug_only(barrier->verify());
176   return barrier;
177 }
178 
179 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
180   if (!supports_entry_barrier(nm)) {
181     return;
182   }
183 
184   NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
185   cmp->set_immediate(value);
186 }
187 
188 int BarrierSetNMethod::guard_value(nmethod* nm) {
189   if (!supports_entry_barrier(nm)) {
190     return disarmed_guard_value();
191   }
192 
193   NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
194   return cmp->get_immediate();
195 }