1 /*
  2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  4  * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 #include "asm/macroAssembler.hpp"
 29 #include "code/compiledIC.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "nativeInst_riscv.hpp"
 32 #include "oops/oop.inline.hpp"
 33 #include "runtime/handles.hpp"
 34 #include "runtime/orderAccess.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "runtime/stubRoutines.hpp"
 37 #include "utilities/ostream.hpp"
 38 #ifdef COMPILER1
 39 #include "c1/c1_Runtime1.hpp"
 40 #endif
 41 
 42 bool NativeInstruction::is_pc_relative_at(address instr) {
 43   // auipc + jalr
 44   // auipc + addi
 45   // auipc + load
 46   // auipc + fload_load
 47   if ((is_auipc_at(instr)) &&
 48       (is_addi_at(instr + 4) || is_jalr_at(instr + 4) || is_load_at(instr + 4) || is_float_load_at(instr + 4)) &&
 49       check_pc_relative_data_dependency(instr)) {
 50     return true;
 51   }
 52   return false;
 53 }
 54 
 55 // ie:ld(Rd, Label)
 56 bool NativeInstruction::is_load_pc_relative_at(address instr) {
 57   if (is_auipc_at(instr) && // auipc
 58       is_ld_at(instr + 4) && // ld
 59       check_load_pc_relative_data_dependency(instr)) {
 60       return true;
 61   }
 62   return false;
 63 }
 64 
 65 bool NativeInstruction::is_movptr_at(address instr) {
 66   if (is_lui_at(instr) && // Lui
 67       is_addi_at(instr + 4) && // Addi
 68       is_slli_shift_at(instr + 8, 11) && // Slli Rd, Rs, 11
 69       is_addi_at(instr + 12) && // Addi
 70       is_slli_shift_at(instr + 16, 5) && // Slli Rd, Rs, 5
 71       (is_addi_at(instr + 20) || is_jalr_at(instr + 20) || is_load_at(instr + 20)) && // Addi/Jalr/Load
 72       check_movptr_data_dependency(instr)) {
 73     return true;
 74   }
 75   return false;
 76 }
 77 
 78 bool NativeInstruction::is_li32_at(address instr) {
 79   if (is_lui_at(instr) && // lui
 80       is_addiw_at(instr + 4) && // addiw
 81       check_li32_data_dependency(instr)) {
 82     return true;
 83   }
 84   return false;
 85 }
 86 
 87 bool NativeInstruction::is_li64_at(address instr) {
 88   if (is_lui_at(instr) && // lui
 89       is_addi_at(instr + 4) && // addi
 90       is_slli_shift_at(instr + 8, 12)&&  // Slli Rd, Rs, 12
 91       is_addi_at(instr + 12) && // addi
 92       is_slli_shift_at(instr + 16, 12) && // Slli Rd, Rs, 12
 93       is_addi_at(instr + 20) && // addi
 94       is_slli_shift_at(instr + 24, 8) && // Slli Rd, Rs, 8
 95       is_addi_at(instr + 28) && // addi
 96       check_li64_data_dependency(instr)) {
 97     return true;
 98   }
 99   return false;
100 }
101 
102 void NativeCall::verify() {
103   assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
104 }
105 
106 address NativeCall::destination() const {
107   address addr = (address)this;
108   assert(NativeInstruction::is_jal_at(instruction_address()), "inst must be jal.");
109   address destination = MacroAssembler::target_addr_for_insn(instruction_address());
110 
111   // Do we use a trampoline stub for this call?
112   CodeBlob* cb = CodeCache::find_blob_unsafe(addr);   // Else we get assertion if nmethod is zombie.
113   assert(cb && cb->is_nmethod(), "sanity");
114   nmethod *nm = (nmethod *)cb;
115   if (nm != NULL && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
116     // Yes we do, so get the destination from the trampoline stub.
117     const address trampoline_stub_addr = destination;
118     destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
119   }
120 
121   return destination;
122 }
123 
124 // Similar to replace_mt_safe, but just changes the destination. The
125 // important thing is that free-running threads are able to execute this
126 // call instruction at all times.
127 //
128 // Used in the runtime linkage of calls; see class CompiledIC.
129 //
130 // Add parameter assert_lock to switch off assertion
131 // during code generation, where no patching lock is needed.
132 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
133   assert(!assert_lock ||
134          (Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
135          CompiledICLocker::is_safe(addr_at(0)),
136          "concurrent code patching");
137 
138   ResourceMark rm;
139   address addr_call = addr_at(0);
140   assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
141 
142   // Patch the constant in the call's trampoline stub.
143   address trampoline_stub_addr = get_trampoline();
144   if (trampoline_stub_addr != NULL) {
145     assert (!is_NativeCallTrampolineStub_at(dest), "chained trampolines");
146     nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
147   }
148 
149   // Patch the call.
150   guarantee(trampoline_stub_addr != NULL, "we need a trampoline");
151   set_destination(trampoline_stub_addr);
152 
153   ICache::invalidate_range(addr_call, instruction_size);
154 }
155 
156 address NativeCall::get_trampoline() {
157   address call_addr = addr_at(0);
158 
159   CodeBlob *code = CodeCache::find_blob(call_addr);
160   assert(code != NULL, "Could not find the containing code blob");
161 
162   address jal_destination = MacroAssembler::pd_call_destination(call_addr);
163   if (code != NULL && code->contains(jal_destination) && is_NativeCallTrampolineStub_at(jal_destination)) {
164     return jal_destination;
165   }
166 
167   if (code != NULL && code->is_nmethod()) {
168     return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
169   }
170 
171   return NULL;
172 }
173 
174 // Inserts a native call instruction at a given pc
175 void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
176 
177 //-------------------------------------------------------------------
178 
179 void NativeMovConstReg::verify() {
180   if (!(nativeInstruction_at(instruction_address())->is_movptr() ||
181         is_auipc_at(instruction_address()))) {
182     fatal("should be MOVPTR or AUIPC");
183   }
184 }
185 
186 intptr_t NativeMovConstReg::data() const {
187   address addr = MacroAssembler::target_addr_for_insn(instruction_address());
188   if (maybe_cpool_ref(instruction_address())) {
189     return *(intptr_t*)addr;
190   } else {
191     return (intptr_t)addr;
192   }
193 }
194 
195 void NativeMovConstReg::set_data(intptr_t x) {
196   if (maybe_cpool_ref(instruction_address())) {
197     address addr = MacroAssembler::target_addr_for_insn(instruction_address());
198     *(intptr_t*)addr = x;
199   } else {
200     // Store x into the instruction stream.
201     MacroAssembler::pd_patch_instruction_size(instruction_address(), (address)x);
202     ICache::invalidate_range(instruction_address(), movptr_instruction_size);
203   }
204 
205   // Find and replace the oop/metadata corresponding to this
206   // instruction in oops section.
207   CodeBlob* cb = CodeCache::find_blob(instruction_address());
208   if(cb != NULL) {
209     nmethod* nm = cb->as_nmethod_or_null();
210     if (nm != NULL) {
211       RelocIterator iter(nm, instruction_address(), next_instruction_address());
212       while (iter.next()) {
213         if (iter.type() == relocInfo::oop_type) {
214           oop* oop_addr = iter.oop_reloc()->oop_addr();
215           *oop_addr = cast_to_oop(x);
216           break;
217         } else if (iter.type() == relocInfo::metadata_type) {
218           Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr();
219           *metadata_addr = (Metadata*)x;
220           break;
221         }
222       }
223     }
224   } else {
225     ShouldNotReachHere();
226   }
227 }
228 
229 void NativeMovConstReg::print() {
230   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
231                 p2i(instruction_address()), data());
232 }
233 
234 //-------------------------------------------------------------------
235 
236 int NativeMovRegMem::offset() const  {
237   Unimplemented();
238   return 0;
239 }
240 
241 void NativeMovRegMem::set_offset(int x) { Unimplemented(); }
242 
243 void NativeMovRegMem::verify() {
244   Unimplemented();
245 }
246 
247 //--------------------------------------------------------------------------------
248 
249 void NativeJump::verify() { }
250 
251 
252 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
253 }
254 
255 
256 address NativeJump::jump_destination() const {
257   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
258 
259   // We use jump to self as the unresolved address which the inline
260   // cache code (and relocs) know about
261   // As a special case we also use sequence movptr_with_offset(r,0), jalr(r,0)
262   // i.e. jump to 0 when we need leave space for a wide immediate
263   // load
264 
265   // return -1 if jump to self or to 0
266   if ((dest == (address) this) || dest == 0) {
267     dest = (address) -1;
268   }
269 
270   return dest;
271 };
272 
273 void NativeJump::set_jump_destination(address dest) {
274   // We use jump to self as the unresolved address which the inline
275   // cache code (and relocs) know about
276   if (dest == (address) -1)
277     dest = instruction_address();
278 
279   MacroAssembler::pd_patch_instruction(instruction_address(), dest);
280   ICache::invalidate_range(instruction_address(), instruction_size);
281 }
282 
283 //-------------------------------------------------------------------
284 
285 address NativeGeneralJump::jump_destination() const {
286   NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
287   address dest = (address) move->data();
288 
289   // We use jump to self as the unresolved address which the inline
290   // cache code (and relocs) know about
291   // As a special case we also use jump to 0 when first generating
292   // a general jump
293 
294   // return -1 if jump to self or to 0
295   if ((dest == (address) this) || dest == 0) {
296     dest = (address) -1;
297   }
298 
299   return dest;
300 }
301 
302 //-------------------------------------------------------------------
303 
304 bool NativeInstruction::is_safepoint_poll() {
305   return is_lwu_to_zr(address(this));
306 }
307 
308 bool NativeInstruction::is_lwu_to_zr(address instr) {
309   assert_cond(instr != NULL);
310   unsigned insn = *(unsigned*)instr;
311   return (Assembler::extract(insn, 6, 0) == 0b0000011 &&
312           Assembler::extract(insn, 14, 12) == 0b110 &&
313           Assembler::extract(insn, 11, 7) == 0b00000); // zr
314 }
315 
316 // A 16-bit instruction with all bits ones is permanently reserved as an illegal instruction.
317 bool NativeInstruction::is_sigill_zombie_not_entrant() {
318   // jvmci
319   return uint_at(0) == 0xffffffff;
320 }
321 
322 void NativeIllegalInstruction::insert(address code_pos) {
323   assert_cond(code_pos != NULL);
324   *(juint*)code_pos = 0xffffffff; // all bits ones is permanently reserved as an illegal instruction
325 }
326 
327 bool NativeInstruction::is_stop() {
328   return uint_at(0) == 0xffffffff; // an illegal instruction
329 }
330 
331 //-------------------------------------------------------------------
332 
333 // MT-safe inserting of a jump over a jump or a nop (used by
334 // nmethod::make_not_entrant_or_zombie)
335 
336 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
337 
338   assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
339 
340   assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() ||
341          nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
342          "riscv64 cannot replace non-jump with jump");
343 
344   // Patch this nmethod atomically.
345   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
346     ptrdiff_t offset = dest - verified_entry;
347     guarantee(is_imm_in_range(offset, 20, 1), "offset is too large to be patched in one jal insrusction."); // 1M
348 
349     uint32_t insn = 0;
350     address pInsn = (address)&insn;
351     Assembler::patch(pInsn, 31, 31, (offset >> 20) & 0x1);
352     Assembler::patch(pInsn, 30, 21, (offset >> 1) & 0x3ff);
353     Assembler::patch(pInsn, 20, 20, (offset >> 11) & 0x1);
354     Assembler::patch(pInsn, 19, 12, (offset >> 12) & 0xff);
355     Assembler::patch(pInsn, 11, 7, 0); // zero, no link jump
356     Assembler::patch(pInsn, 6, 0, 0b1101111); // j, (jal x0 offset)
357     *(unsigned int*)verified_entry = insn;
358   } else {
359     // We use an illegal instruction for marking a method as
360     // not_entrant or zombie.
361     NativeIllegalInstruction::insert(verified_entry);
362   }
363 
364   ICache::invalidate_range(verified_entry, instruction_size);
365 }
366 
367 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
368   NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
369 
370   CodeBuffer cb(code_pos, instruction_size);
371   MacroAssembler a(&cb);
372 
373   int32_t offset = 0;
374   a.movptr_with_offset(t0, entry, offset); // lui, addi, slli, addi, slli
375   a.jalr(x0, t0, offset); // jalr
376 
377   ICache::invalidate_range(code_pos, instruction_size);
378 }
379 
380 // MT-safe patching of a long jump instruction.
381 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
382   ShouldNotCallThis();
383 }
384 
385 
386 address NativeCallTrampolineStub::destination(nmethod *nm) const {
387   return ptr_at(data_offset);
388 }
389 
390 void NativeCallTrampolineStub::set_destination(address new_destination) {
391   set_ptr_at(data_offset, new_destination);
392   OrderAccess::release();
393 }
394 
395 uint32_t NativeMembar::get_kind() {
396   uint32_t insn = uint_at(0);
397 
398   uint32_t predecessor = Assembler::extract(insn, 27, 24);
399   uint32_t successor = Assembler::extract(insn, 23, 20);
400 
401   return MacroAssembler::pred_succ_to_membar_mask(predecessor, successor);
402 }
403 
404 void NativeMembar::set_kind(uint32_t order_kind) {
405   uint32_t predecessor = 0;
406   uint32_t successor = 0;
407 
408   MacroAssembler::membar_mask_to_pred_succ(order_kind, predecessor, successor);
409 
410   uint32_t insn = uint_at(0);
411   address pInsn = (address) &insn;
412   Assembler::patch(pInsn, 27, 24, predecessor);
413   Assembler::patch(pInsn, 23, 20, successor);
414 
415   address membar = addr_at(0);
416   *(unsigned int*) membar = insn;
417 }
418