1 /*
  2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  4  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 #include "asm/macroAssembler.hpp"
 29 #include "code/compiledIC.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "nativeInst_riscv.hpp"
 32 #include "oops/oop.inline.hpp"
 33 #include "runtime/handles.hpp"
 34 #include "runtime/orderAccess.hpp"
 35 #include "runtime/safepoint.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "runtime/stubRoutines.hpp"
 38 #include "utilities/ostream.hpp"
 39 #ifdef COMPILER1
 40 #include "c1/c1_Runtime1.hpp"
 41 #endif
 42 
 43 Register NativeInstruction::extract_rs1(address instr) {
 44   assert_cond(instr != NULL);
 45   return as_Register(Assembler::extract(((unsigned*)instr)[0], 19, 15));
 46 }
 47 
 48 Register NativeInstruction::extract_rs2(address instr) {
 49   assert_cond(instr != NULL);
 50   return as_Register(Assembler::extract(((unsigned*)instr)[0], 24, 20));
 51 }
 52 
 53 Register NativeInstruction::extract_rd(address instr) {
 54   assert_cond(instr != NULL);
 55   return as_Register(Assembler::extract(((unsigned*)instr)[0], 11, 7));
 56 }
 57 
 58 uint32_t NativeInstruction::extract_opcode(address instr) {
 59   assert_cond(instr != NULL);
 60   return Assembler::extract(((unsigned*)instr)[0], 6, 0);
 61 }
 62 
 63 uint32_t NativeInstruction::extract_funct3(address instr) {
 64   assert_cond(instr != NULL);
 65   return Assembler::extract(((unsigned*)instr)[0], 14, 12);
 66 }
 67 
 68 bool NativeInstruction::is_pc_relative_at(address instr) {
 69   // auipc + jalr
 70   // auipc + addi
 71   // auipc + load
 72   // auipc + fload_load
 73   return (is_auipc_at(instr)) &&
 74          (is_addi_at(instr + instruction_size) ||
 75           is_jalr_at(instr + instruction_size) ||
 76           is_load_at(instr + instruction_size) ||
 77           is_float_load_at(instr + instruction_size)) &&
 78          check_pc_relative_data_dependency(instr);
 79 }
 80 
 81 // ie:ld(Rd, Label)
 82 bool NativeInstruction::is_load_pc_relative_at(address instr) {
 83   return is_auipc_at(instr) && // auipc
 84          is_ld_at(instr + instruction_size) && // ld
 85          check_load_pc_relative_data_dependency(instr);
 86 }
 87 
 88 bool NativeInstruction::is_movptr_at(address instr) {
 89   return is_lui_at(instr) && // Lui
 90          is_addi_at(instr + instruction_size) && // Addi
 91          is_slli_shift_at(instr + instruction_size * 2, 11) && // Slli Rd, Rs, 11
 92          is_addi_at(instr + instruction_size * 3) && // Addi
 93          is_slli_shift_at(instr + instruction_size * 4, 6) && // Slli Rd, Rs, 6
 94          (is_addi_at(instr + instruction_size * 5) ||
 95           is_jalr_at(instr + instruction_size * 5) ||
 96           is_load_at(instr + instruction_size * 5)) && // Addi/Jalr/Load
 97          check_movptr_data_dependency(instr);
 98 }
 99 
100 bool NativeInstruction::is_li32_at(address instr) {
101   return is_lui_at(instr) && // lui
102          is_addiw_at(instr + instruction_size) && // addiw
103          check_li32_data_dependency(instr);
104 }
105 
106 bool NativeInstruction::is_li64_at(address instr) {
107   return is_lui_at(instr) && // lui
108          is_addi_at(instr + instruction_size) && // addi
109          is_slli_shift_at(instr + instruction_size * 2, 12) &&  // Slli Rd, Rs, 12
110          is_addi_at(instr + instruction_size * 3) && // addi
111          is_slli_shift_at(instr + instruction_size * 4, 12) &&  // Slli Rd, Rs, 12
112          is_addi_at(instr + instruction_size * 5) && // addi
113          is_slli_shift_at(instr + instruction_size * 6, 8) &&   // Slli Rd, Rs, 8
114          is_addi_at(instr + instruction_size * 7) && // addi
115          check_li64_data_dependency(instr);
116 }
117 
118 void NativeCall::verify() {
119   assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
120 }
121 
122 address NativeCall::destination() const {
123   address addr = (address)this;
124   assert(NativeInstruction::is_jal_at(instruction_address()), "inst must be jal.");
125   address destination = MacroAssembler::target_addr_for_insn(instruction_address());
126 
127   // Do we use a trampoline stub for this call?
128   CodeBlob* cb = CodeCache::find_blob_unsafe(addr);   // Else we get assertion if nmethod is zombie.
129   assert(cb && cb->is_nmethod(), "sanity");
130   nmethod *nm = (nmethod *)cb;
131   if (nm != NULL && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
132     // Yes we do, so get the destination from the trampoline stub.
133     const address trampoline_stub_addr = destination;
134     destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
135   }
136 
137   return destination;
138 }
139 
140 // Similar to replace_mt_safe, but just changes the destination. The
141 // important thing is that free-running threads are able to execute this
142 // call instruction at all times.
143 //
144 // Used in the runtime linkage of calls; see class CompiledIC.
145 //
146 // Add parameter assert_lock to switch off assertion
147 // during code generation, where no patching lock is needed.
148 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
149   assert(!assert_lock ||
150          (Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
151          CompiledICLocker::is_safe(addr_at(0)),
152          "concurrent code patching");
153 
154   ResourceMark rm;
155   address addr_call = addr_at(0);
156   assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
157 
158   // Patch the constant in the call's trampoline stub.
159   address trampoline_stub_addr = get_trampoline();
160   if (trampoline_stub_addr != NULL) {
161     assert (!is_NativeCallTrampolineStub_at(dest), "chained trampolines");
162     nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
163   }
164 
165   // Patch the call.
166   if (Assembler::reachable_from_branch_at(addr_call, dest)) {
167     set_destination(dest);
168   } else {
169     assert (trampoline_stub_addr != NULL, "we need a trampoline");
170     set_destination(trampoline_stub_addr);
171   }
172 
173   ICache::invalidate_range(addr_call, instruction_size);
174 }
175 
176 address NativeCall::get_trampoline() {
177   address call_addr = addr_at(0);
178 
179   CodeBlob *code = CodeCache::find_blob(call_addr);
180   assert(code != NULL, "Could not find the containing code blob");
181 
182   address jal_destination = MacroAssembler::pd_call_destination(call_addr);
183   if (code != NULL && code->contains(jal_destination) && is_NativeCallTrampolineStub_at(jal_destination)) {
184     return jal_destination;
185   }
186 
187   if (code != NULL && code->is_nmethod()) {
188     return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
189   }
190 
191   return NULL;
192 }
193 
194 // Inserts a native call instruction at a given pc
195 void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
196 
197 //-------------------------------------------------------------------
198 
199 void NativeMovConstReg::verify() {
200   if (!(nativeInstruction_at(instruction_address())->is_movptr() ||
201         is_auipc_at(instruction_address()))) {
202     fatal("should be MOVPTR or AUIPC");
203   }
204 }
205 
206 intptr_t NativeMovConstReg::data() const {
207   address addr = MacroAssembler::target_addr_for_insn(instruction_address());
208   if (maybe_cpool_ref(instruction_address())) {
209     return *(intptr_t*)addr;
210   } else {
211     return (intptr_t)addr;
212   }
213 }
214 
215 void NativeMovConstReg::set_data(intptr_t x) {
216   if (maybe_cpool_ref(instruction_address())) {
217     address addr = MacroAssembler::target_addr_for_insn(instruction_address());
218     *(intptr_t*)addr = x;
219   } else {
220     // Store x into the instruction stream.
221     MacroAssembler::pd_patch_instruction_size(instruction_address(), (address)x);
222     ICache::invalidate_range(instruction_address(), movptr_instruction_size);
223   }
224 
225   // Find and replace the oop/metadata corresponding to this
226   // instruction in oops section.
227   CodeBlob* cb = CodeCache::find_blob(instruction_address());
228   nmethod* nm = cb->as_nmethod_or_null();
229   if (nm != NULL) {
230     RelocIterator iter(nm, instruction_address(), next_instruction_address());
231     while (iter.next()) {
232       if (iter.type() == relocInfo::oop_type) {
233         oop* oop_addr = iter.oop_reloc()->oop_addr();
234         *oop_addr = cast_to_oop(x);
235         break;
236       } else if (iter.type() == relocInfo::metadata_type) {
237         Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr();
238         *metadata_addr = (Metadata*)x;
239         break;
240       }
241     }
242   }
243 }
244 
245 void NativeMovConstReg::print() {
246   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
247                 p2i(instruction_address()), data());
248 }
249 
250 //-------------------------------------------------------------------
251 
252 int NativeMovRegMem::offset() const  {
253   Unimplemented();
254   return 0;
255 }
256 
257 void NativeMovRegMem::set_offset(int x) { Unimplemented(); }
258 
259 void NativeMovRegMem::verify() {
260   Unimplemented();
261 }
262 
263 //--------------------------------------------------------------------------------
264 
265 void NativeJump::verify() { }
266 
267 
268 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
269   // Patching to not_entrant can happen while activations of the method are
270   // in use. The patching in that instance must happen only when certain
271   // alignment restrictions are true. These guarantees check those
272   // conditions.
273 
274   // Must be 4 bytes aligned
275   MacroAssembler::assert_alignment(verified_entry);
276 }
277 
278 
279 address NativeJump::jump_destination() const {
280   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
281 
282   // We use jump to self as the unresolved address which the inline
283   // cache code (and relocs) know about
284   // As a special case we also use sequence movptr_with_offset(r,0), jalr(r,0)
285   // i.e. jump to 0 when we need leave space for a wide immediate
286   // load
287 
288   // return -1 if jump to self or to 0
289   if ((dest == (address) this) || dest == 0) {
290     dest = (address) -1;
291   }
292 
293   return dest;
294 };
295 
296 void NativeJump::set_jump_destination(address dest) {
297   // We use jump to self as the unresolved address which the inline
298   // cache code (and relocs) know about
299   if (dest == (address) -1)
300     dest = instruction_address();
301 
302   MacroAssembler::pd_patch_instruction(instruction_address(), dest);
303   ICache::invalidate_range(instruction_address(), instruction_size);
304 }
305 
306 //-------------------------------------------------------------------
307 
308 address NativeGeneralJump::jump_destination() const {
309   NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
310   address dest = (address) move->data();
311 
312   // We use jump to self as the unresolved address which the inline
313   // cache code (and relocs) know about
314   // As a special case we also use jump to 0 when first generating
315   // a general jump
316 
317   // return -1 if jump to self or to 0
318   if ((dest == (address) this) || dest == 0) {
319     dest = (address) -1;
320   }
321 
322   return dest;
323 }
324 
325 //-------------------------------------------------------------------
326 
327 bool NativeInstruction::is_safepoint_poll() {
328   return is_lwu_to_zr(address(this));
329 }
330 
331 bool NativeInstruction::is_lwu_to_zr(address instr) {
332   assert_cond(instr != NULL);
333   return (extract_opcode(instr) == 0b0000011 &&
334           extract_funct3(instr) == 0b110 &&
335           extract_rd(instr) == zr);         // zr
336 }
337 
338 // A 16-bit instruction with all bits ones is permanently reserved as an illegal instruction.
339 bool NativeInstruction::is_sigill_zombie_not_entrant() {
340   // jvmci
341   return uint_at(0) == 0xffffffff;
342 }
343 
344 void NativeIllegalInstruction::insert(address code_pos) {
345   assert_cond(code_pos != NULL);
346   *(juint*)code_pos = 0xffffffff; // all bits ones is permanently reserved as an illegal instruction
347 }
348 
349 bool NativeInstruction::is_stop() {
350   return uint_at(0) == 0xc0101073; // an illegal instruction, 'csrrw x0, time, x0'
351 }
352 
353 //-------------------------------------------------------------------
354 
355 // MT-safe inserting of a jump over a jump or a nop (used by
356 // nmethod::make_not_entrant_or_zombie)
357 
358 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
359 
360   assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
361 
362   assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() ||
363          nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
364          "riscv cannot replace non-jump with jump");
365 
366   check_verified_entry_alignment(entry, verified_entry);
367 
368   // Patch this nmethod atomically.
369   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
370     ptrdiff_t offset = dest - verified_entry;
371     guarantee(is_imm_in_range(offset, 20, 1), "offset is too large to be patched in one jal insrusction."); // 1M
372 
373     uint32_t insn = 0;
374     address pInsn = (address)&insn;
375     Assembler::patch(pInsn, 31, 31, (offset >> 20) & 0x1);
376     Assembler::patch(pInsn, 30, 21, (offset >> 1) & 0x3ff);
377     Assembler::patch(pInsn, 20, 20, (offset >> 11) & 0x1);
378     Assembler::patch(pInsn, 19, 12, (offset >> 12) & 0xff);
379     Assembler::patch(pInsn, 11, 7, 0); // zero, no link jump
380     Assembler::patch(pInsn, 6, 0, 0b1101111); // j, (jal x0 offset)
381     *(unsigned int*)verified_entry = insn;
382   } else {
383     // We use an illegal instruction for marking a method as
384     // not_entrant or zombie.
385     NativeIllegalInstruction::insert(verified_entry);
386   }
387 
388   ICache::invalidate_range(verified_entry, instruction_size);
389 }
390 
391 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
392   CodeBuffer cb(code_pos, instruction_size);
393   MacroAssembler a(&cb);
394 
395   int32_t offset = 0;
396   a.movptr_with_offset(t0, entry, offset); // lui, addi, slli, addi, slli
397   a.jalr(x0, t0, offset); // jalr
398 
399   ICache::invalidate_range(code_pos, instruction_size);
400 }
401 
402 // MT-safe patching of a long jump instruction.
403 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
404   ShouldNotCallThis();
405 }
406 
407 
408 address NativeCallTrampolineStub::destination(nmethod *nm) const {
409   return ptr_at(data_offset);
410 }
411 
412 void NativeCallTrampolineStub::set_destination(address new_destination) {
413   set_ptr_at(data_offset, new_destination);
414   OrderAccess::release();
415 }
416 
417 uint32_t NativeMembar::get_kind() {
418   uint32_t insn = uint_at(0);
419 
420   uint32_t predecessor = Assembler::extract(insn, 27, 24);
421   uint32_t successor = Assembler::extract(insn, 23, 20);
422 
423   return MacroAssembler::pred_succ_to_membar_mask(predecessor, successor);
424 }
425 
426 void NativeMembar::set_kind(uint32_t order_kind) {
427   uint32_t predecessor = 0;
428   uint32_t successor = 0;
429 
430   MacroAssembler::membar_mask_to_pred_succ(order_kind, predecessor, successor);
431 
432   uint32_t insn = uint_at(0);
433   address pInsn = (address) &insn;
434   Assembler::patch(pInsn, 27, 24, predecessor);
435   Assembler::patch(pInsn, 23, 20, successor);
436 
437   address membar = addr_at(0);
438   *(unsigned int*) membar = insn;
439 }