1 /*
  2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  4  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 #include "asm/macroAssembler.hpp"
 29 #include "code/compiledIC.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "nativeInst_riscv.hpp"
 32 #include "oops/oop.inline.hpp"
 33 #include "runtime/handles.hpp"
 34 #include "runtime/orderAccess.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "runtime/stubRoutines.hpp"
 37 #include "utilities/ostream.hpp"
 38 #ifdef COMPILER1
 39 #include "c1/c1_Runtime1.hpp"
 40 #endif
 41 
 42 Register NativeInstruction::extract_rs1(address instr) {
 43   assert_cond(instr != NULL);
 44   return as_Register(Assembler::extract(((unsigned*)instr)[0], 19, 15));
 45 }
 46 
 47 Register NativeInstruction::extract_rs2(address instr) {
 48   assert_cond(instr != NULL);
 49   return as_Register(Assembler::extract(((unsigned*)instr)[0], 24, 20));
 50 }
 51 
 52 Register NativeInstruction::extract_rd(address instr) {
 53   assert_cond(instr != NULL);
 54   return as_Register(Assembler::extract(((unsigned*)instr)[0], 11, 7));
 55 }
 56 
 57 uint32_t NativeInstruction::extract_opcode(address instr) {
 58   assert_cond(instr != NULL);
 59   return Assembler::extract(((unsigned*)instr)[0], 6, 0);
 60 }
 61 
 62 uint32_t NativeInstruction::extract_funct3(address instr) {
 63   assert_cond(instr != NULL);
 64   return Assembler::extract(((unsigned*)instr)[0], 14, 12);
 65 }
 66 
 67 bool NativeInstruction::is_pc_relative_at(address instr) {
 68   // auipc + jalr
 69   // auipc + addi
 70   // auipc + load
 71   // auipc + fload_load
 72   return (is_auipc_at(instr)) &&
 73          (is_addi_at(instr + instruction_size) ||
 74           is_jalr_at(instr + instruction_size) ||
 75           is_load_at(instr + instruction_size) ||
 76           is_float_load_at(instr + instruction_size)) &&
 77          check_pc_relative_data_dependency(instr);
 78 }
 79 
 80 // ie:ld(Rd, Label)
 81 bool NativeInstruction::is_load_pc_relative_at(address instr) {
 82   return is_auipc_at(instr) && // auipc
 83          is_ld_at(instr + instruction_size) && // ld
 84          check_load_pc_relative_data_dependency(instr);
 85 }
 86 
 87 bool NativeInstruction::is_movptr_at(address instr) {
 88   return is_lui_at(instr) && // Lui
 89          is_addi_at(instr + instruction_size) && // Addi
 90          is_slli_shift_at(instr + instruction_size * 2, 11) && // Slli Rd, Rs, 11
 91          is_addi_at(instr + instruction_size * 3) && // Addi
 92          is_slli_shift_at(instr + instruction_size * 4, 5) && // Slli Rd, Rs, 5
 93          (is_addi_at(instr + instruction_size * 5) ||
 94           is_jalr_at(instr + instruction_size * 5) ||
 95           is_load_at(instr + instruction_size * 5)) && // Addi/Jalr/Load
 96          check_movptr_data_dependency(instr);
 97 }
 98 
 99 bool NativeInstruction::is_li32_at(address instr) {
100   return is_lui_at(instr) && // lui
101          is_addiw_at(instr + instruction_size) && // addiw
102          check_li32_data_dependency(instr);
103 }
104 
105 bool NativeInstruction::is_li64_at(address instr) {
106   return is_lui_at(instr) && // lui
107          is_addi_at(instr + instruction_size) && // addi
108          is_slli_shift_at(instr + instruction_size * 2, 12) &&  // Slli Rd, Rs, 12
109          is_addi_at(instr + instruction_size * 3) && // addi
110          is_slli_shift_at(instr + instruction_size * 4, 12) &&  // Slli Rd, Rs, 12
111          is_addi_at(instr + instruction_size * 5) && // addi
112          is_slli_shift_at(instr + instruction_size * 6, 8) &&   // Slli Rd, Rs, 8
113          is_addi_at(instr + instruction_size * 7) && // addi
114          check_li64_data_dependency(instr);
115 }
116 
117 void NativeCall::verify() {
118   assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
119 }
120 
121 address NativeCall::destination() const {
122   address addr = (address)this;
123   assert(NativeInstruction::is_jal_at(instruction_address()), "inst must be jal.");
124   address destination = MacroAssembler::target_addr_for_insn(instruction_address());
125 
126   // Do we use a trampoline stub for this call?
127   CodeBlob* cb = CodeCache::find_blob_unsafe(addr);   // Else we get assertion if nmethod is zombie.
128   assert(cb && cb->is_nmethod(), "sanity");
129   nmethod *nm = (nmethod *)cb;
130   if (nm != NULL && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
131     // Yes we do, so get the destination from the trampoline stub.
132     const address trampoline_stub_addr = destination;
133     destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
134   }
135 
136   return destination;
137 }
138 
139 // Similar to replace_mt_safe, but just changes the destination. The
140 // important thing is that free-running threads are able to execute this
141 // call instruction at all times.
142 //
143 // Used in the runtime linkage of calls; see class CompiledIC.
144 //
145 // Add parameter assert_lock to switch off assertion
146 // during code generation, where no patching lock is needed.
147 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
148   assert(!assert_lock ||
149          (Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
150          CompiledICLocker::is_safe(addr_at(0)),
151          "concurrent code patching");
152 
153   ResourceMark rm;
154   address addr_call = addr_at(0);
155   assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
156 
157   // Patch the constant in the call's trampoline stub.
158   address trampoline_stub_addr = get_trampoline();
159   if (trampoline_stub_addr != NULL) {
160     assert (!is_NativeCallTrampolineStub_at(dest), "chained trampolines");
161     nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
162   }
163 
164   // Patch the call.
165   if (Assembler::reachable_from_branch_at(addr_call, dest)) {
166     set_destination(dest);
167   } else {
168     assert (trampoline_stub_addr != NULL, "we need a trampoline");
169     set_destination(trampoline_stub_addr);
170   }
171 
172   ICache::invalidate_range(addr_call, instruction_size);
173 }
174 
175 address NativeCall::get_trampoline() {
176   address call_addr = addr_at(0);
177 
178   CodeBlob *code = CodeCache::find_blob(call_addr);
179   assert(code != NULL, "Could not find the containing code blob");
180 
181   address jal_destination = MacroAssembler::pd_call_destination(call_addr);
182   if (code != NULL && code->contains(jal_destination) && is_NativeCallTrampolineStub_at(jal_destination)) {
183     return jal_destination;
184   }
185 
186   if (code != NULL && code->is_nmethod()) {
187     return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
188   }
189 
190   return NULL;
191 }
192 
193 // Inserts a native call instruction at a given pc
194 void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
195 
196 //-------------------------------------------------------------------
197 
198 void NativeMovConstReg::verify() {
199   if (!(nativeInstruction_at(instruction_address())->is_movptr() ||
200         is_auipc_at(instruction_address()))) {
201     fatal("should be MOVPTR or AUIPC");
202   }
203 }
204 
205 intptr_t NativeMovConstReg::data() const {
206   address addr = MacroAssembler::target_addr_for_insn(instruction_address());
207   if (maybe_cpool_ref(instruction_address())) {
208     return *(intptr_t*)addr;
209   } else {
210     return (intptr_t)addr;
211   }
212 }
213 
214 void NativeMovConstReg::set_data(intptr_t x) {
215   if (maybe_cpool_ref(instruction_address())) {
216     address addr = MacroAssembler::target_addr_for_insn(instruction_address());
217     *(intptr_t*)addr = x;
218   } else {
219     // Store x into the instruction stream.
220     MacroAssembler::pd_patch_instruction_size(instruction_address(), (address)x);
221     ICache::invalidate_range(instruction_address(), movptr_instruction_size);
222   }
223 
224   // Find and replace the oop/metadata corresponding to this
225   // instruction in oops section.
226   CodeBlob* cb = CodeCache::find_blob(instruction_address());
227   nmethod* nm = cb->as_nmethod_or_null();
228   if (nm != NULL) {
229     RelocIterator iter(nm, instruction_address(), next_instruction_address());
230     while (iter.next()) {
231       if (iter.type() == relocInfo::oop_type) {
232         oop* oop_addr = iter.oop_reloc()->oop_addr();
233         *oop_addr = cast_to_oop(x);
234         break;
235       } else if (iter.type() == relocInfo::metadata_type) {
236         Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr();
237         *metadata_addr = (Metadata*)x;
238         break;
239       }
240     }
241   }
242 }
243 
244 void NativeMovConstReg::print() {
245   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
246                 p2i(instruction_address()), data());
247 }
248 
249 //-------------------------------------------------------------------
250 
251 int NativeMovRegMem::offset() const  {
252   Unimplemented();
253   return 0;
254 }
255 
256 void NativeMovRegMem::set_offset(int x) { Unimplemented(); }
257 
258 void NativeMovRegMem::verify() {
259   Unimplemented();
260 }
261 
262 //--------------------------------------------------------------------------------
263 
264 void NativeJump::verify() { }
265 
266 
267 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
268 }
269 
270 
271 address NativeJump::jump_destination() const {
272   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
273 
274   // We use jump to self as the unresolved address which the inline
275   // cache code (and relocs) know about
276   // As a special case we also use sequence movptr_with_offset(r,0), jalr(r,0)
277   // i.e. jump to 0 when we need leave space for a wide immediate
278   // load
279 
280   // return -1 if jump to self or to 0
281   if ((dest == (address) this) || dest == 0) {
282     dest = (address) -1;
283   }
284 
285   return dest;
286 };
287 
288 void NativeJump::set_jump_destination(address dest) {
289   // We use jump to self as the unresolved address which the inline
290   // cache code (and relocs) know about
291   if (dest == (address) -1)
292     dest = instruction_address();
293 
294   MacroAssembler::pd_patch_instruction(instruction_address(), dest);
295   ICache::invalidate_range(instruction_address(), instruction_size);
296 }
297 
298 //-------------------------------------------------------------------
299 
300 address NativeGeneralJump::jump_destination() const {
301   NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
302   address dest = (address) move->data();
303 
304   // We use jump to self as the unresolved address which the inline
305   // cache code (and relocs) know about
306   // As a special case we also use jump to 0 when first generating
307   // a general jump
308 
309   // return -1 if jump to self or to 0
310   if ((dest == (address) this) || dest == 0) {
311     dest = (address) -1;
312   }
313 
314   return dest;
315 }
316 
317 //-------------------------------------------------------------------
318 
319 bool NativeInstruction::is_safepoint_poll() {
320   return is_lwu_to_zr(address(this));
321 }
322 
323 bool NativeInstruction::is_lwu_to_zr(address instr) {
324   assert_cond(instr != NULL);
325   return (extract_opcode(instr) == 0b0000011 &&
326           extract_funct3(instr) == 0b110 &&
327           extract_rd(instr) == zr);         // zr
328 }
329 
330 // A 16-bit instruction with all bits ones is permanently reserved as an illegal instruction.
331 bool NativeInstruction::is_sigill_zombie_not_entrant() {
332   // jvmci
333   return uint_at(0) == 0xffffffff;
334 }
335 
336 void NativeIllegalInstruction::insert(address code_pos) {
337   assert_cond(code_pos != NULL);
338   *(juint*)code_pos = 0xffffffff; // all bits ones is permanently reserved as an illegal instruction
339 }
340 
341 bool NativeInstruction::is_stop() {
342   return uint_at(0) == 0xffffffff; // an illegal instruction
343 }
344 
345 //-------------------------------------------------------------------
346 
347 // MT-safe inserting of a jump over a jump or a nop (used by
348 // nmethod::make_not_entrant_or_zombie)
349 
350 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
351 
352   assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
353 
354   assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() ||
355          nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
356          "riscv cannot replace non-jump with jump");
357 
358   // Patch this nmethod atomically.
359   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
360     ptrdiff_t offset = dest - verified_entry;
361     guarantee(is_imm_in_range(offset, 20, 1), "offset is too large to be patched in one jal insrusction."); // 1M
362 
363     uint32_t insn = 0;
364     address pInsn = (address)&insn;
365     Assembler::patch(pInsn, 31, 31, (offset >> 20) & 0x1);
366     Assembler::patch(pInsn, 30, 21, (offset >> 1) & 0x3ff);
367     Assembler::patch(pInsn, 20, 20, (offset >> 11) & 0x1);
368     Assembler::patch(pInsn, 19, 12, (offset >> 12) & 0xff);
369     Assembler::patch(pInsn, 11, 7, 0); // zero, no link jump
370     Assembler::patch(pInsn, 6, 0, 0b1101111); // j, (jal x0 offset)
371     *(unsigned int*)verified_entry = insn;
372   } else {
373     // We use an illegal instruction for marking a method as
374     // not_entrant or zombie.
375     NativeIllegalInstruction::insert(verified_entry);
376   }
377 
378   ICache::invalidate_range(verified_entry, instruction_size);
379 }
380 
381 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
382   CodeBuffer cb(code_pos, instruction_size);
383   MacroAssembler a(&cb);
384 
385   int32_t offset = 0;
386   a.movptr_with_offset(t0, entry, offset); // lui, addi, slli, addi, slli
387   a.jalr(x0, t0, offset); // jalr
388 
389   ICache::invalidate_range(code_pos, instruction_size);
390 }
391 
392 // MT-safe patching of a long jump instruction.
393 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
394   ShouldNotCallThis();
395 }
396 
397 
398 address NativeCallTrampolineStub::destination(nmethod *nm) const {
399   return ptr_at(data_offset);
400 }
401 
402 void NativeCallTrampolineStub::set_destination(address new_destination) {
403   set_ptr_at(data_offset, new_destination);
404   OrderAccess::release();
405 }
406 
407 uint32_t NativeMembar::get_kind() {
408   uint32_t insn = uint_at(0);
409 
410   uint32_t predecessor = Assembler::extract(insn, 27, 24);
411   uint32_t successor = Assembler::extract(insn, 23, 20);
412 
413   return MacroAssembler::pred_succ_to_membar_mask(predecessor, successor);
414 }
415 
416 void NativeMembar::set_kind(uint32_t order_kind) {
417   uint32_t predecessor = 0;
418   uint32_t successor = 0;
419 
420   MacroAssembler::membar_mask_to_pred_succ(order_kind, predecessor, successor);
421 
422   uint32_t insn = uint_at(0);
423   address pInsn = (address) &insn;
424   Assembler::patch(pInsn, 27, 24, predecessor);
425   Assembler::patch(pInsn, 23, 20, successor);
426 
427   address membar = addr_at(0);
428   *(unsigned int*) membar = insn;
429 }