1 /*
  2  * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "code/nativeInst.hpp"
 28 #include "gc/shared/barrierSetNMethod.hpp"
 29 #include "logging/log.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "runtime/sharedRuntime.hpp"
 32 #include "runtime/thread.hpp"
 33 #include "utilities/align.hpp"
 34 #include "utilities/debug.hpp"
 35 
 36 class NativeNMethodCmpBarrier: public NativeInstruction {
 37 public:
 38 #ifdef _LP64
 39   enum Intel_specific_constants {
 40     instruction_code        = 0x81,
 41     instruction_size        = 8,
 42     imm_offset              = 4,
 43     instruction_rex_prefix  = Assembler::REX | Assembler::REX_B,
 44     instruction_modrm       = 0x7f  // [r15 + offset]
 45   };
 46 #else
 47   enum Intel_specific_constants {
 48     instruction_code        = 0x81,
 49     instruction_size        = 7,
 50     imm_offset              = 2,
 51     instruction_modrm       = 0x3f  // [rdi]
 52   };
 53 #endif
 54 
 55   address instruction_address() const { return addr_at(0); }
 56   address immediate_address() const { return addr_at(imm_offset); }
 57 
 58   jint get_immedate() const { return int_at(imm_offset); }
 59   void set_immediate(jint imm) { set_int_at(imm_offset, imm); }
 60   void verify() const;
 61 };
 62 
 63 #ifdef _LP64
 64 void NativeNMethodCmpBarrier::verify() const {
 65   if (((uintptr_t) instruction_address()) & 0x7) {
 66     fatal("Not properly aligned");
 67   }
 68 
 69   int prefix = ubyte_at(0);
 70   if (prefix != instruction_rex_prefix) {
 71     tty->print_cr("Addr: " INTPTR_FORMAT " Prefix: 0x%x", p2i(instruction_address()),
 72         prefix);
 73     fatal("not a cmp barrier");
 74   }
 75 
 76   int inst = ubyte_at(1);
 77   if (inst != instruction_code) {
 78     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
 79         inst);
 80     fatal("not a cmp barrier");
 81   }
 82 
 83   int modrm = ubyte_at(2);
 84   if (modrm != instruction_modrm) {
 85     tty->print_cr("Addr: " INTPTR_FORMAT " mod/rm: 0x%x", p2i(instruction_address()),
 86         modrm);
 87     fatal("not a cmp barrier");
 88   }
 89 }
 90 #else
 91 void NativeNMethodCmpBarrier::verify() const {
 92   if (((uintptr_t) instruction_address()) & 0x3) {
 93     fatal("Not properly aligned");
 94   }
 95 
 96   int inst = ubyte_at(0);
 97   if (inst != instruction_code) {
 98     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
 99         inst);
100     fatal("not a cmp barrier");
101   }
102 
103   int modrm = ubyte_at(1);
104   if (modrm != instruction_modrm) {
105     tty->print_cr("Addr: " INTPTR_FORMAT " mod/rm: 0x%x", p2i(instruction_address()),
106         modrm);
107     fatal("not a cmp barrier");
108   }
109 }
110 #endif // _LP64
111 
112 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
113   /*
114    * [ callers frame          ]
115    * [ callers return address ] <- callers rsp
116    * [ callers rbp            ] <- callers rbp
117    * [ callers frame slots    ]
118    * [ return_address         ] <- return_address_ptr
119    * [ cookie ]                 <- used to write the new rsp (callers rsp)
120    * [ stub rbp ]
121    * [ stub stuff             ]
122    */
123 
124   address* stub_rbp = return_address_ptr - 2;
125   address* callers_rsp = return_address_ptr + nm->frame_size(); /* points to callers return_address now */
126   address* callers_rbp = callers_rsp - 1; // 1 to move to the callers return address, 1 more to move to the rbp
127   address* cookie = return_address_ptr - 1;
128 
129   LogTarget(Trace, nmethod, barrier) out;
130   if (out.is_enabled()) {
131     JavaThread* jth = JavaThread::current();
132     ResourceMark mark;
133     log_trace(nmethod, barrier)("deoptimize(nmethod: %p, return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
134                                nm, (address *) return_address_ptr, nm->is_osr_method(), jth,
135                                jth->name(), callers_rsp, nm->verified_entry_point());
136   }
137 
138   assert(nm->frame_size() >= 3, "invariant");
139   assert(*cookie == (address) -1, "invariant");
140 
141   // Preserve caller rbp.
142   *stub_rbp = *callers_rbp;
143 
144   // At the cookie address put the callers rsp.
145   *cookie = (address) callers_rsp; // should point to the return address
146 
147   // In the slot that used to be the callers rbp we put the address that our stub needs to jump to at the end.
148   // Overwriting the caller rbp should be okay since our stub rbp has the same value.
149   address* jmp_addr_ptr = callers_rbp;
150   *jmp_addr_ptr = SharedRuntime::get_handle_wrong_method_stub();
151 }
152 
153 // This is the offset of the entry barrier from where the frame is completed.
154 // If any code changes between the end of the verified entry where the entry
155 // barrier resides, and the completion of the frame, then
156 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
157 // not find the expected native instruction at this offset, which needs updating.
158 // Note that this offset is invariant of PreserveFramePointer.
159 static const int entry_barrier_offset = LP64_ONLY(-19) NOT_LP64(-18);
160 
161 static NativeNMethodCmpBarrier* native_nmethod_barrier(nmethod* nm) {
162   address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset;
163   NativeNMethodCmpBarrier* barrier = reinterpret_cast<NativeNMethodCmpBarrier*>(barrier_address);
164   debug_only(barrier->verify());
165   return barrier;
166 }
167 
168 void BarrierSetNMethod::disarm(nmethod* nm) {
169   if (!supports_entry_barrier(nm)) {
170     return;
171   }
172 
173   NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
174   cmp->set_immediate(disarmed_value());
175 }
176 
177 void BarrierSetNMethod::arm(nmethod* nm, int arm_value) {
178   if (!supports_entry_barrier(nm)) {
179     return;
180   }
181 
182   NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
183   cmp->set_immediate(arm_value);
184 }
185 
186 bool BarrierSetNMethod::is_armed(nmethod* nm) {
187   if (!supports_entry_barrier(nm)) {
188     return false;
189   }
190 
191   NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
192   return (disarmed_value() != cmp->get_immedate());
193 }