1 /*
  2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
  4  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #ifndef CPU_RISCV_NATIVEINST_RISCV_HPP
 28 #define CPU_RISCV_NATIVEINST_RISCV_HPP
 29 
 30 #include "asm/assembler.hpp"
 31 #include "runtime/icache.hpp"
 32 #include "runtime/os.hpp"
 33 
 34 // We have interfaces for the following instructions:
 35 // - NativeInstruction
 36 // - - NativeCall
 37 // - - NativeMovConstReg
 38 // - - NativeMovRegMem
 39 // - - NativeJump
 40 // - - NativeGeneralJump
 41 // - - NativeIllegalInstruction
 42 // - - NativeCallTrampolineStub
 43 // - - NativeMembar
 44 // - - NativeFenceI
 45 
 46 // The base class for different kinds of native instruction abstractions.
 47 // Provides the primitive operations to manipulate code relative to this.
 48 
 49 class NativeCall;
 50 
 51 class NativeInstruction {
 52   friend class Relocation;
 53   friend bool is_NativeCallTrampolineStub_at(address);
 54  public:
 55   enum {
 56     instruction_size = 4,
 57     compressed_instruction_size = 2,
 58   };
 59 
 60   juint encoding() const {
 61     return uint_at(0);
 62   }
 63 
 64   bool is_jal()                             const { return is_jal_at(addr_at(0));         }
 65   bool is_movptr()                          const { return is_movptr_at(addr_at(0));      }
 66   bool is_call()                            const { return is_call_at(addr_at(0));        }
 67   bool is_jump()                            const { return is_jump_at(addr_at(0));        }
 68 
 69   static bool is_jal_at(address instr)        { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1101111; }
 70   static bool is_jalr_at(address instr)       { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1100111 && extract_funct3(instr) == 0b000; }
 71   static bool is_branch_at(address instr)     { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1100011; }
 72   static bool is_ld_at(address instr)         { assert_cond(instr != NULL); return is_load_at(instr) && extract_funct3(instr) == 0b011; }
 73   static bool is_load_at(address instr)       { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0000011; }
 74   static bool is_float_load_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0000111; }
 75   static bool is_auipc_at(address instr)      { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0010111; }
 76   static bool is_jump_at(address instr)       { assert_cond(instr != NULL); return is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr); }
 77   static bool is_addi_at(address instr)       { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0010011 && extract_funct3(instr) == 0b000; }
 78   static bool is_addiw_at(address instr)      { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0011011 && extract_funct3(instr) == 0b000; }
 79   static bool is_lui_at(address instr)        { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0110111; }
 80   static bool is_slli_shift_at(address instr, uint32_t shift) {
 81     assert_cond(instr != NULL);
 82     return (extract_opcode(instr) == 0b0010011 && // opcode field
 83             extract_funct3(instr) == 0b001 &&     // funct3 field, select the type of operation
 84             Assembler::extract(((unsigned*)instr)[0], 25, 20) == shift);    // shamt field
 85   }
 86 
 87   static Register extract_rs1(address instr);
 88   static Register extract_rs2(address instr);
 89   static Register extract_rd(address instr);
 90   static uint32_t extract_opcode(address instr);
 91   static uint32_t extract_funct3(address instr);
 92 
 93   // the instruction sequence of movptr is as below:
 94   //     lui
 95   //     addi
 96   //     slli
 97   //     addi
 98   //     slli
 99   //     addi/jalr/load
100   static bool check_movptr_data_dependency(address instr) {
101     address lui = instr;
102     address addi1 = lui + instruction_size;
103     address slli1 = addi1 + instruction_size;
104     address addi2 = slli1 + instruction_size;
105     address slli2 = addi2 + instruction_size;
106     address last_instr = slli2 + instruction_size;
107     return extract_rs1(addi1) == extract_rd(lui) &&
108            extract_rs1(addi1) == extract_rd(addi1) &&
109            extract_rs1(slli1) == extract_rd(addi1) &&
110            extract_rs1(slli1) == extract_rd(slli1) &&
111            extract_rs1(addi2) == extract_rd(slli1) &&
112            extract_rs1(addi2) == extract_rd(addi2) &&
113            extract_rs1(slli2) == extract_rd(addi2) &&
114            extract_rs1(slli2) == extract_rd(slli2) &&
115            extract_rs1(last_instr) == extract_rd(slli2);
116   }
117 
118   // the instruction sequence of li64 is as below:
119   //     lui
120   //     addi
121   //     slli
122   //     addi
123   //     slli
124   //     addi
125   //     slli
126   //     addi
127   static bool check_li64_data_dependency(address instr) {
128     address lui = instr;
129     address addi1 = lui + instruction_size;
130     address slli1 = addi1 + instruction_size;
131     address addi2 = slli1 + instruction_size;
132     address slli2 = addi2 + instruction_size;
133     address addi3 = slli2 + instruction_size;
134     address slli3 = addi3 + instruction_size;
135     address addi4 = slli3 + instruction_size;
136     return extract_rs1(addi1) == extract_rd(lui) &&
137            extract_rs1(addi1) == extract_rd(addi1) &&
138            extract_rs1(slli1) == extract_rd(addi1) &&
139            extract_rs1(slli1) == extract_rd(slli1) &&
140            extract_rs1(addi2) == extract_rd(slli1) &&
141            extract_rs1(addi2) == extract_rd(addi2) &&
142            extract_rs1(slli2) == extract_rd(addi2) &&
143            extract_rs1(slli2) == extract_rd(slli2) &&
144            extract_rs1(addi3) == extract_rd(slli2) &&
145            extract_rs1(addi3) == extract_rd(addi3) &&
146            extract_rs1(slli3) == extract_rd(addi3) &&
147            extract_rs1(slli3) == extract_rd(slli3) &&
148            extract_rs1(addi4) == extract_rd(slli3) &&
149            extract_rs1(addi4) == extract_rd(addi4);
150   }
151 
152   // the instruction sequence of li32 is as below:
153   //     lui
154   //     addiw
155   static bool check_li32_data_dependency(address instr) {
156     address lui = instr;
157     address addiw = lui + instruction_size;
158 
159     return extract_rs1(addiw) == extract_rd(lui) &&
160            extract_rs1(addiw) == extract_rd(addiw);
161   }
162 
163   // the instruction sequence of pc-relative is as below:
164   //     auipc
165   //     jalr/addi/load/float_load
166   static bool check_pc_relative_data_dependency(address instr) {
167     address auipc = instr;
168     address last_instr = auipc + instruction_size;
169 
170     return extract_rs1(last_instr) == extract_rd(auipc);
171   }
172 
173   // the instruction sequence of load_label is as below:
174   //     auipc
175   //     load
176   static bool check_load_pc_relative_data_dependency(address instr) {
177     address auipc = instr;
178     address load = auipc + instruction_size;
179 
180     return extract_rd(load) == extract_rd(auipc) &&
181            extract_rs1(load) == extract_rd(load);
182   }
183 
184   static bool is_movptr_at(address instr);
185   static bool is_li32_at(address instr);
186   static bool is_li64_at(address instr);
187   static bool is_pc_relative_at(address branch);
188   static bool is_load_pc_relative_at(address branch);
189 
190   static bool is_call_at(address instr) {
191     if (is_jal_at(instr) || is_jalr_at(instr)) {
192       return true;
193     }
194     return false;
195   }
196   static bool is_lwu_to_zr(address instr);
197 
198   inline bool is_nop();
199   inline bool is_jump_or_nop();
200   bool is_safepoint_poll();
201   bool is_sigill_zombie_not_entrant();
202   bool is_stop();
203 
204  protected:
205   address addr_at(int offset) const    { return address(this) + offset; }
206 
207   jint int_at(int offset) const        { return *(jint*) addr_at(offset); }
208   juint uint_at(int offset) const      { return *(juint*) addr_at(offset); }
209 
210   address ptr_at(int offset) const     { return *(address*) addr_at(offset); }
211 
212   oop  oop_at (int offset) const       { return *(oop*) addr_at(offset); }
213 
214 
215   void set_int_at(int offset, jint  i)        { *(jint*)addr_at(offset) = i; }
216   void set_uint_at(int offset, jint  i)       { *(juint*)addr_at(offset) = i; }
217   void set_ptr_at (int offset, address  ptr)  { *(address*) addr_at(offset) = ptr; }
218   void set_oop_at (int offset, oop  o)        { *(oop*) addr_at(offset) = o; }
219 
220  public:
221 
222   inline friend NativeInstruction* nativeInstruction_at(address addr);
223 
224   static bool maybe_cpool_ref(address instr) {
225     return is_auipc_at(instr);
226   }
227 
228   bool is_membar() {
229     return (uint_at(0) & 0x7f) == 0b1111 && extract_funct3(addr_at(0)) == 0;
230   }
231 };
232 
233 inline NativeInstruction* nativeInstruction_at(address addr) {
234   return (NativeInstruction*)addr;
235 }
236 
237 // The natural type of an RISCV instruction is uint32_t
238 inline NativeInstruction* nativeInstruction_at(uint32_t *addr) {
239   return (NativeInstruction*)addr;
240 }
241 
242 inline NativeCall* nativeCall_at(address addr);
243 // The NativeCall is an abstraction for accessing/manipulating native
244 // call instructions (used to manipulate inline caches, primitive &
245 // DSO calls, etc.).
246 
247 class NativeCall: public NativeInstruction {
248  public:
249   enum RISCV_specific_constants {
250     instruction_size            =    4,
251     instruction_offset          =    0,
252     displacement_offset         =    0,
253     return_address_offset       =    4
254   };
255 
256   address instruction_address() const       { return addr_at(instruction_offset); }
257   address next_instruction_address() const  { return addr_at(return_address_offset); }
258   address return_address() const            { return addr_at(return_address_offset); }
259   address destination() const;
260 
261   void set_destination(address dest) {
262     assert(is_jal(), "Should be jal instruction!");
263     intptr_t offset = (intptr_t)(dest - instruction_address());
264     assert((offset & 0x1) == 0, "bad alignment");
265     assert(is_imm_in_range(offset, 20, 1), "encoding constraint");
266     unsigned int insn = 0b1101111; // jal
267     address pInsn = (address)(&insn);
268     Assembler::patch(pInsn, 31, 31, (offset >> 20) & 0x1);
269     Assembler::patch(pInsn, 30, 21, (offset >> 1) & 0x3ff);
270     Assembler::patch(pInsn, 20, 20, (offset >> 11) & 0x1);
271     Assembler::patch(pInsn, 19, 12, (offset >> 12) & 0xff);
272     Assembler::patch(pInsn, 11, 7, ra->encoding()); // Rd must be x1, need ra
273     set_int_at(displacement_offset, insn);
274   }
275 
276   void verify_alignment() {} // do nothing on riscv
277   void verify();
278   void print();
279 
280   // Creation
281   inline friend NativeCall* nativeCall_at(address addr);
282   inline friend NativeCall* nativeCall_before(address return_address);
283 
284   static bool is_call_before(address return_address) {
285     return is_call_at(return_address - NativeCall::return_address_offset);
286   }
287 
288   // MT-safe patching of a call instruction.
289   static void insert(address code_pos, address entry);
290 
291   static void replace_mt_safe(address instr_addr, address code_buffer);
292 
293   // Similar to replace_mt_safe, but just changes the destination.  The
294   // important thing is that free-running threads are able to execute
295   // this call instruction at all times.  If the call is an immediate BL
296   // instruction we can simply rely on atomicity of 32-bit writes to
297   // make sure other threads will see no intermediate states.
298 
299   // We cannot rely on locks here, since the free-running threads must run at
300   // full speed.
301   //
302   // Used in the runtime linkage of calls; see class CompiledIC.
303   // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
304 
305   // The parameter assert_lock disables the assertion during code generation.
306   void set_destination_mt_safe(address dest, bool assert_lock = true);
307 
308   address get_trampoline();
309 };
310 
311 inline NativeCall* nativeCall_at(address addr) {
312   assert_cond(addr != NULL);
313   NativeCall* call = (NativeCall*)(addr - NativeCall::instruction_offset);
314 #ifdef ASSERT
315   call->verify();
316 #endif
317   return call;
318 }
319 
320 inline NativeCall* nativeCall_before(address return_address) {
321   assert_cond(return_address != NULL);
322   NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
323 #ifdef ASSERT
324   call->verify();
325 #endif
326   return call;
327 }
328 
329 // An interface for accessing/manipulating native mov reg, imm instructions.
330 // (used to manipulate inlined 64-bit data calls, etc.)
331 class NativeMovConstReg: public NativeInstruction {
332  public:
333   enum RISCV_specific_constants {
334     movptr_instruction_size             =    6 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli, addi.  See movptr().
335     movptr_with_offset_instruction_size =    5 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli. See movptr_with_offset().
336     load_pc_relative_instruction_size   =    2 * NativeInstruction::instruction_size, // auipc, ld
337     instruction_offset                  =    0,
338     displacement_offset                 =    0
339   };
340 
341   address instruction_address() const       { return addr_at(instruction_offset); }
342   address next_instruction_address() const  {
343     // if the instruction at 5 * instruction_size is addi,
344     // it means a lui + addi + slli + addi + slli + addi instruction sequence,
345     // and the next instruction address should be addr_at(6 * instruction_size).
346     // However, when the instruction at 5 * instruction_size isn't addi,
347     // the next instruction address should be addr_at(5 * instruction_size)
348     if (nativeInstruction_at(instruction_address())->is_movptr()) {
349       if (is_addi_at(addr_at(movptr_with_offset_instruction_size))) {
350         // Assume: lui, addi, slli, addi, slli, addi
351         return addr_at(movptr_instruction_size);
352       } else {
353         // Assume: lui, addi, slli, addi, slli
354         return addr_at(movptr_with_offset_instruction_size);
355       }
356     } else if (is_load_pc_relative_at(instruction_address())) {
357       // Assume: auipc, ld
358       return addr_at(load_pc_relative_instruction_size);
359     }
360     guarantee(false, "Unknown instruction in NativeMovConstReg");
361     return NULL;
362   }
363 
364   intptr_t data() const;
365   void  set_data(intptr_t x);
366 
367   void flush() {
368     if (!maybe_cpool_ref(instruction_address())) {
369       ICache::invalidate_range(instruction_address(), movptr_instruction_size);
370     }
371   }
372 
373   void  verify();
374   void  print();
375 
376   // Creation
377   inline friend NativeMovConstReg* nativeMovConstReg_at(address addr);
378   inline friend NativeMovConstReg* nativeMovConstReg_before(address addr);
379 };
380 
381 inline NativeMovConstReg* nativeMovConstReg_at(address addr) {
382   assert_cond(addr != NULL);
383   NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_offset);
384 #ifdef ASSERT
385   test->verify();
386 #endif
387   return test;
388 }
389 
390 inline NativeMovConstReg* nativeMovConstReg_before(address addr) {
391   assert_cond(addr != NULL);
392   NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
393 #ifdef ASSERT
394   test->verify();
395 #endif
396   return test;
397 }
398 
399 // RISCV should not use C1 runtime patching, so just leave NativeMovRegMem Unimplemented.
400 class NativeMovRegMem: public NativeInstruction {
401  public:
402   int instruction_start() const {
403     Unimplemented();
404     return 0;
405   }
406 
407   address instruction_address() const {
408     Unimplemented();
409     return NULL;
410   }
411 
412   int num_bytes_to_end_of_patch() const {
413     Unimplemented();
414     return 0;
415   }
416 
417   int offset() const;
418 
419   void set_offset(int x);
420 
421   void add_offset_in_bytes(int add_offset) { Unimplemented(); }
422 
423   void verify();
424   void print();
425 
426  private:
427   inline friend NativeMovRegMem* nativeMovRegMem_at (address addr);
428 };
429 
430 inline NativeMovRegMem* nativeMovRegMem_at (address addr) {
431   Unimplemented();
432   return NULL;
433 }
434 
435 class NativeJump: public NativeInstruction {
436  public:
437   enum RISCV_specific_constants {
438     instruction_size            =    NativeInstruction::instruction_size,
439     instruction_offset          =    0,
440     data_offset                 =    0,
441     next_instruction_offset     =    NativeInstruction::instruction_size
442   };
443 
444   address instruction_address() const       { return addr_at(instruction_offset); }
445   address next_instruction_address() const  { return addr_at(instruction_size); }
446   address jump_destination() const;
447   void set_jump_destination(address dest);
448 
449   // Creation
450   inline friend NativeJump* nativeJump_at(address address);
451 
452   void verify();
453 
454   // Insertion of native jump instruction
455   static void insert(address code_pos, address entry);
456   // MT-safe insertion of native jump at verified method entry
457   static void check_verified_entry_alignment(address entry, address verified_entry);
458   static void patch_verified_entry(address entry, address verified_entry, address dest);
459 };
460 
461 inline NativeJump* nativeJump_at(address addr) {
462   NativeJump* jump = (NativeJump*)(addr - NativeJump::instruction_offset);
463 #ifdef ASSERT
464   jump->verify();
465 #endif
466   return jump;
467 }
468 
469 class NativeGeneralJump: public NativeJump {
470 public:
471   enum RISCV_specific_constants {
472     instruction_size            =    6 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli, jalr
473     instruction_offset          =    0,
474     data_offset                 =    0,
475     next_instruction_offset     =    6 * NativeInstruction::instruction_size  // lui, addi, slli, addi, slli, jalr
476   };
477 
478   address jump_destination() const;
479 
480   static void insert_unconditional(address code_pos, address entry);
481   static void replace_mt_safe(address instr_addr, address code_buffer);
482 };
483 
484 inline NativeGeneralJump* nativeGeneralJump_at(address addr) {
485   assert_cond(addr != NULL);
486   NativeGeneralJump* jump = (NativeGeneralJump*)(addr);
487   debug_only(jump->verify();)
488   return jump;
489 }
490 
491 class NativeIllegalInstruction: public NativeInstruction {
492  public:
493   // Insert illegal opcode as specific address
494   static void insert(address code_pos);
495 };
496 
497 inline bool NativeInstruction::is_nop()         {
498   uint32_t insn = *(uint32_t*)addr_at(0);
499   return insn == 0x13;
500 }
501 
502 inline bool NativeInstruction::is_jump_or_nop() {
503   return is_nop() || is_jump();
504 }
505 
506 // Call trampoline stubs.
507 class NativeCallTrampolineStub : public NativeInstruction {
508  public:
509 
510   enum RISCV_specific_constants {
511     // Refer to function emit_trampoline_stub.
512     instruction_size = 3 * NativeInstruction::instruction_size + wordSize, // auipc + ld + jr + target address
513     data_offset      = 3 * NativeInstruction::instruction_size,            // auipc + ld + jr
514   };
515 
516   address destination(nmethod *nm = NULL) const;
517   void set_destination(address new_destination);
518   ptrdiff_t destination_offset() const;
519 };
520 
521 inline bool is_NativeCallTrampolineStub_at(address addr) {
522   // Ensure that the stub is exactly
523   //      ld   t0, L--->auipc + ld
524   //      jr   t0
525   // L:
526 
527   // judge inst + register + imm
528   // 1). check the instructions: auipc + ld + jalr
529   // 2). check if auipc[11:7] == t0 and ld[11:7] == t0 and ld[19:15] == t0 && jr[19:15] == t0
530   // 3). check if the offset in ld[31:20] equals the data_offset
531   assert_cond(addr != NULL);
532   const int instr_size = NativeInstruction::instruction_size;
533   if (NativeInstruction::is_auipc_at(addr) &&
534       NativeInstruction::is_ld_at(addr + instr_size) &&
535       NativeInstruction::is_jalr_at(addr + 2 * instr_size) &&
536       (NativeInstruction::extract_rd(addr)                    == x5) &&
537       (NativeInstruction::extract_rd(addr + instr_size)       == x5) &&
538       (NativeInstruction::extract_rs1(addr + instr_size)      == x5) &&
539       (NativeInstruction::extract_rs1(addr + 2 * instr_size)  == x5) &&
540       (Assembler::extract(((unsigned*)addr)[1], 31, 20) == NativeCallTrampolineStub::data_offset)) {
541     return true;
542   }
543   return false;
544 }
545 
546 inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) {
547   assert_cond(addr != NULL);
548   assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found");
549   return (NativeCallTrampolineStub*)addr;
550 }
551 
552 class NativeMembar : public NativeInstruction {
553 public:
554   uint32_t get_kind();
555   void set_kind(uint32_t order_kind);
556 };
557 
558 inline NativeMembar *NativeMembar_at(address addr) {
559   assert_cond(addr != NULL);
560   assert(nativeInstruction_at(addr)->is_membar(), "no membar found");
561   return (NativeMembar*)addr;
562 }
563 
564 class NativeFenceI : public NativeInstruction {
565 public:
566   static inline int instruction_size() {
567     // 2 for fence.i + fence
568     return (UseConservativeFence ? 2 : 1) * NativeInstruction::instruction_size;
569   }
570 };
571 
572 #endif // CPU_RISCV_NATIVEINST_RISCV_HPP