1 /*
  2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
  4  * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #ifndef CPU_RISCV_NATIVEINST_RISCV_HPP
 28 #define CPU_RISCV_NATIVEINST_RISCV_HPP
 29 
 30 #include "asm/assembler.hpp"
 31 #include "runtime/icache.hpp"
 32 #include "runtime/os.hpp"
 33 
 34 // We have interfaces for the following instructions:
 35 // - NativeInstruction
 36 // - - NativeCall
 37 // - - NativeMovConstReg
 38 // - - NativeMovRegMem
 39 // - - NativeJump
 40 // - - NativeGeneralJump
 41 // - - NativeIllegalInstruction
 42 // - - NativeCallTrampolineStub
 43 // - - NativeMembar
 44 // - - NativeFenceI
 45 
 46 // The base class for different kinds of native instruction abstractions.
 47 // Provides the primitive operations to manipulate code relative to this.
 48 
 49 class NativeCall;
 50 
 51 class NativeInstruction {
 52   friend class Relocation;
 53   friend bool is_NativeCallTrampolineStub_at(address);
 54  public:
 55   enum {
 56     instruction_size = 4
 57   };
 58 
 59   juint encoding() const {
 60     return uint_at(0);
 61   }
 62 
 63   bool is_jal()                             const { return is_jal_at(addr_at(0));         }
 64   bool is_movptr()                          const { return is_movptr_at(addr_at(0));      }
 65   bool is_call()                            const { return is_call_at(addr_at(0));        }
 66   bool is_jump()                            const { return is_jump_at(addr_at(0));        }
 67 
 68   static bool is_jal_at(address instr)        { assert_cond(instr != NULL); return Assembler::extract(((unsigned*)instr)[0], 6, 0) == 0b1101111; }
 69   static bool is_jalr_at(address instr)       { assert_cond(instr != NULL); return (Assembler::extract(((unsigned*)instr)[0], 6, 0) == 0b1100111 &&
 70                                                 Assembler::extract(((unsigned*)instr)[0], 14, 12) == 0b000); }
 71   static bool is_branch_at(address instr)     { assert_cond(instr != NULL); return Assembler::extract(((unsigned*)instr)[0], 6, 0) == 0b1100011; }
 72   static bool is_ld_at(address instr)         { assert_cond(instr != NULL); return (Assembler::extract(((unsigned*)instr)[0], 6, 0) == 0b0000011 &&
 73                                                 Assembler::extract(((unsigned*)instr)[0], 14, 12) == 0b011); }
 74   static bool is_load_at(address instr)       { assert_cond(instr != NULL); return Assembler::extract(((unsigned*)instr)[0], 6, 0) == 0b0000011; }
 75   static bool is_float_load_at(address instr) { assert_cond(instr != NULL); return Assembler::extract(((unsigned*)instr)[0], 6, 0) == 0b0000111; }
 76   static bool is_auipc_at(address instr)      { assert_cond(instr != NULL); return Assembler::extract(((unsigned*)instr)[0], 6, 0) == 0b0010111; }
 77   static bool is_jump_at(address instr)       { assert_cond(instr != NULL); return (is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr)); }
 78   static bool is_addi_at(address instr)       { assert_cond(instr != NULL); return (Assembler::extract(((unsigned*)instr)[0], 6, 0) == 0b0010011 &&
 79                                                 Assembler::extract(((unsigned*)instr)[0], 14, 12) == 0b000); }
 80   static bool is_addiw_at(address instr)      { assert_cond(instr != NULL); return (Assembler::extract(((unsigned*)instr)[0], 6, 0) == 0b0011011 &&
 81                                                 Assembler::extract(((unsigned*)instr)[0], 14, 12) == 0b000); }
 82   static bool is_lui_at(address instr)        { assert_cond(instr != NULL); return Assembler::extract(((unsigned*)instr)[0], 6, 0) == 0b0110111; }
 83   static bool is_slli_shift_at(address instr, uint32_t shift) {
 84     assert_cond(instr != NULL);
 85     return (Assembler::extract(((unsigned*)instr)[0], 6, 0) == 0b0010011 && // opcode field
 86             Assembler::extract(((unsigned*)instr)[0], 14, 12) == 0b001 &&   // funct3 field, select the type of operation
 87             Assembler::extract(((unsigned*)instr)[0], 25, 20) == shift);    // shamt field
 88   }
 89 
 90   // return true if the (index1~index2) field of instr1 is equal to (index3~index4) field of instr2, otherwise false
 91   static bool compare_instr_field(address instr1, int index1, int index2, address instr2, int index3, int index4) {
 92     assert_cond(instr1 != NULL && instr2 != NULL);
 93     return Assembler::extract(((unsigned*)instr1)[0], index1, index2) == Assembler::extract(((unsigned*)instr2)[0], index3, index4);
 94   }
 95 
 96   // the instruction sequence of movptr is as below:
 97   //     lui
 98   //     addi
 99   //     slli
100   //     addi
101   //     slli
102   //     addi/jalr/load
103   static bool check_movptr_data_dependency(address instr) {
104     return compare_instr_field(instr + 4, 19, 15, instr, 11, 7)       &&     // check the rs1 field of addi and the rd field of lui
105            compare_instr_field(instr + 4, 19, 15, instr + 4, 11, 7)   &&     // check the rs1 field and the rd field of addi
106            compare_instr_field(instr + 8, 19, 15, instr + 4, 11, 7)   &&     // check the rs1 field of slli and the rd field of addi
107            compare_instr_field(instr + 8, 19, 15, instr + 8, 11, 7)   &&     // check the rs1 field and the rd field of slli
108            compare_instr_field(instr + 12, 19, 15, instr + 8, 11, 7)  &&     // check the rs1 field of addi and the rd field of slli
109            compare_instr_field(instr + 12, 19, 15, instr + 12, 11, 7) &&     // check the rs1 field and the rd field of addi
110            compare_instr_field(instr + 16, 19, 15, instr + 12, 11, 7) &&     // check the rs1 field of slli and the rd field of addi
111            compare_instr_field(instr + 16, 19, 15, instr + 16, 11, 7) &&     // check the rs1 field and the rd field of slli
112            compare_instr_field(instr + 20, 19, 15, instr + 16, 11, 7);       // check the rs1 field of addi/jalr/load and the rd field of slli
113   }
114 
115   // the instruction sequence of li64 is as below:
116   //     lui
117   //     addi
118   //     slli
119   //     addi
120   //     slli
121   //     addi
122   //     slli
123   //     addi
124   static bool check_li64_data_dependency(address instr) {
125     return compare_instr_field(instr + 4, 19, 15, instr, 11, 7)       &&  // check the rs1 field of addi and the rd field of lui
126            compare_instr_field(instr + 4, 19, 15, instr + 4, 11, 7)   &&  // check the rs1 field and the rd field of addi
127            compare_instr_field(instr + 8, 19, 15, instr + 4, 11, 7)   &&  // check the rs1 field of slli and the rd field of addi
128            compare_instr_field(instr + 8, 19, 15, instr + 8, 11, 7)   &&  // check the rs1 field and the rd field of slli
129            compare_instr_field(instr + 12, 19, 15, instr + 8, 11, 7)  &&  // check the rs1 field of addi and the rd field of slli
130            compare_instr_field(instr + 12, 19, 15, instr + 12, 11, 7) &&  // check the rs1 field and the rd field of addi
131            compare_instr_field(instr + 16, 19, 15, instr + 12, 11, 7) &&  // check the rs1 field of slli and the rd field of addi
132            compare_instr_field(instr + 16, 19, 15, instr + 16, 11, 7) &&  // check the rs1 field and the rd field fof slli
133            compare_instr_field(instr + 20, 19, 15, instr + 16, 11, 7) &&  // check the rs1 field of addi and the rd field of slli
134            compare_instr_field(instr + 20, 19, 15, instr + 20, 11, 7) &&  // check the rs1 field and the rd field of addi
135            compare_instr_field(instr + 24, 19, 15, instr + 20, 11, 7) &&  // check the rs1 field of slli and the rd field of addi
136            compare_instr_field(instr + 24, 19, 15, instr + 24, 11, 7) &&  // check the rs1 field and the rd field of slli
137            compare_instr_field(instr + 28, 19, 15, instr + 24, 11, 7) &&  // check the rs1 field of addi and the rd field of slli
138            compare_instr_field(instr + 28, 19, 15, instr + 28, 11, 7);    // check the rs1 field and the rd field of addi
139   }
140 
141   // the instruction sequence of li32 is as below:
142   //     lui
143   //     addiw
144   static bool check_li32_data_dependency(address instr) {
145     return compare_instr_field(instr + 4, 19, 15, instr, 11, 7) &&     // check the rs1 field of addiw and the rd field of lui
146            compare_instr_field(instr + 4, 19, 15, instr + 4, 11, 7);   // check the rs1 field and the rd field of addiw
147   }
148 
149   // the instruction sequence of pc-relative is as below:
150   //     auipc
151   //     jalr/addi/load/float_load
152   static bool check_pc_relative_data_dependency(address instr) {
153     return compare_instr_field(instr, 11, 7, instr + 4, 19, 15);          // check the rd field of auipc and the rs1 field of jalr/addi/load/float_load
154   }
155 
156   // the instruction sequence of load_label is as below:
157   //     auipc
158   //     load
159   static bool check_load_pc_relative_data_dependency(address instr) {
160     return compare_instr_field(instr, 11, 7, instr + 4, 11, 7) &&      // check the rd field of auipc and the rd field of load
161            compare_instr_field(instr + 4, 19, 15, instr + 4, 11, 7);   // check the rs1 field of load and the rd field of load
162   }
163 
164   static bool is_movptr_at(address instr);
165   static bool is_li32_at(address instr);
166   static bool is_li64_at(address instr);
167   static bool is_pc_relative_at(address branch);
168   static bool is_load_pc_relative_at(address branch);
169 
170   static bool is_call_at(address instr) {
171     if (is_jal_at(instr) || is_jalr_at(instr)) {
172       return true;
173     }
174     return false;
175   }
176   static bool is_lwu_to_zr(address instr);
177 
178   inline bool is_nop();
179   inline bool is_illegal();
180   inline bool is_return();
181   inline bool is_jump_or_nop();
182   inline bool is_cond_jump();
183   bool is_safepoint_poll();
184   bool is_sigill_zombie_not_entrant();
185   bool is_stop();
186 
187  protected:
188   address addr_at(int offset) const    { return address(this) + offset; }
189 
190   jint int_at(int offset) const        { return *(jint*) addr_at(offset); }
191   juint uint_at(int offset) const      { return *(juint*) addr_at(offset); }
192 
193   address ptr_at(int offset) const     { return *(address*) addr_at(offset); }
194 
195   oop  oop_at (int offset) const       { return *(oop*) addr_at(offset); }
196 
197 
198   void set_int_at(int offset, jint  i)        { *(jint*)addr_at(offset) = i; }
199   void set_uint_at(int offset, jint  i)       { *(juint*)addr_at(offset) = i; }
200   void set_ptr_at (int offset, address  ptr)  { *(address*) addr_at(offset) = ptr; }
201   void set_oop_at (int offset, oop  o)        { *(oop*) addr_at(offset) = o; }
202 
203  public:
204 
205   inline friend NativeInstruction* nativeInstruction_at(address addr);
206 
207   static bool maybe_cpool_ref(address instr) {
208     return is_auipc_at(instr);
209   }
210 
211   bool is_membar() {
212     unsigned int insn = uint_at(0);
213     return (insn & 0x7f) == 0b1111 && Assembler::extract(insn, 14, 12) == 0;
214   }
215 };
216 
217 inline NativeInstruction* nativeInstruction_at(address addr) {
218   return (NativeInstruction*)addr;
219 }
220 
221 // The natural type of an RISCV64 instruction is uint32_t
222 inline NativeInstruction* nativeInstruction_at(uint32_t *addr) {
223   return (NativeInstruction*)addr;
224 }
225 
226 inline NativeCall* nativeCall_at(address addr);
227 // The NativeCall is an abstraction for accessing/manipulating native
228 // call instructions (used to manipulate inline caches, primitive &
229 // DSO calls, etc.).
230 
231 class NativeCall: public NativeInstruction {
232  public:
233   enum RISCV64_specific_constants {
234     instruction_size            =    4,
235     instruction_offset          =    0,
236     displacement_offset         =    0,
237     return_address_offset       =    4
238   };
239 
240   address instruction_address() const       { return addr_at(instruction_offset); }
241   address next_instruction_address() const  { return addr_at(return_address_offset); }
242   address return_address() const            { return addr_at(return_address_offset); }
243   address destination() const;
244 
245   void set_destination(address dest)      {
246     if (is_jal()) {
247       intptr_t offset = (intptr_t)(dest - instruction_address());
248       assert((offset & 0x1) == 0, "should be aligned");
249       assert(is_imm_in_range(offset, 20, 1), "set_destination, offset is too large to be patched in one jal insrusction\n");
250       unsigned int insn = 0b1101111; // jal
251       address pInsn = (address)(&insn);
252       Assembler::patch(pInsn, 31, 31, (offset >> 20) & 0x1);
253       Assembler::patch(pInsn, 30, 21, (offset >> 1) & 0x3ff);
254       Assembler::patch(pInsn, 20, 20, (offset >> 11) & 0x1);
255       Assembler::patch(pInsn, 19, 12, (offset >> 12) & 0xff);
256       Assembler::patch(pInsn, 11, 7, lr->encoding()); // Rd must be x1, need lr
257       set_int_at(displacement_offset, insn);
258       return;
259     }
260     ShouldNotReachHere();
261   }
262 
263   void  verify_alignment()                       { ; }
264   void  verify();
265   void  print();
266 
267   // Creation
268   inline friend NativeCall* nativeCall_at(address addr);
269   inline friend NativeCall* nativeCall_before(address return_address);
270 
271   static bool is_call_before(address return_address) {
272     return is_call_at(return_address - NativeCall::return_address_offset);
273   }
274 
275   // MT-safe patching of a call instruction.
276   static void insert(address code_pos, address entry);
277 
278   static void replace_mt_safe(address instr_addr, address code_buffer);
279 
280   // Similar to replace_mt_safe, but just changes the destination.  The
281   // important thing is that free-running threads are able to execute
282   // this call instruction at all times.  If the call is an immediate BL
283   // instruction we can simply rely on atomicity of 32-bit writes to
284   // make sure other threads will see no intermediate states.
285 
286   // We cannot rely on locks here, since the free-running threads must run at
287   // full speed.
288   //
289   // Used in the runtime linkage of calls; see class CompiledIC.
290   // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
291 
292   // The parameter assert_lock disables the assertion during code generation.
293   void set_destination_mt_safe(address dest, bool assert_lock = true);
294 
295   address get_trampoline();
296 };
297 
298 inline NativeCall* nativeCall_at(address addr) {
299   assert_cond(addr != NULL);
300   NativeCall* call = (NativeCall*)(addr - NativeCall::instruction_offset);
301 #ifdef ASSERT
302   call->verify();
303 #endif
304   return call;
305 }
306 
307 inline NativeCall* nativeCall_before(address return_address) {
308   assert_cond(return_address != NULL);
309   NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
310 #ifdef ASSERT
311   call->verify();
312 #endif
313   return call;
314 }
315 
316 // An interface for accessing/manipulating native mov reg, imm instructions.
317 // (used to manipulate inlined 64-bit data calls, etc.)
318 class NativeMovConstReg: public NativeInstruction {
319  public:
320   enum RISCV64_specific_constants {
321     movptr_instruction_size             =    6 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli, addi.  See movptr().
322     movptr_with_offset_instruction_size =    5 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli. See movptr_with_offset().
323     load_pc_relative_instruction_size   =    2 * NativeInstruction::instruction_size, // auipc, ld
324     instruction_offset                  =    0,
325     displacement_offset                 =    0
326   };
327 
328   address instruction_address() const       { return addr_at(instruction_offset); }
329   address next_instruction_address() const  {
330     // if the instruction at 5 * instruction_size is addi,
331     // it means a lui + addi + slli + addi + slli + addi instruction sequence,
332     // and the next instruction address should be addr_at(6 * instruction_size).
333     // However, when the instruction at 5 * instruction_size isn't addi,
334     // the next instruction address should be addr_at(5 * instruction_size)
335     if (nativeInstruction_at(instruction_address())->is_movptr()) {
336       if (is_addi_at(addr_at(movptr_with_offset_instruction_size))) {
337         // Assume: lui, addi, slli, addi, slli, addi
338         return addr_at(movptr_instruction_size);
339       } else {
340         // Assume: lui, addi, slli, addi, slli
341         return addr_at(movptr_with_offset_instruction_size);
342       }
343     } else if (is_load_pc_relative_at(instruction_address())) {
344       // Assume: auipc, ld
345       return addr_at(load_pc_relative_instruction_size);
346     }
347     guarantee(false, "Unknown instruction in NativeMovConstReg");
348     return NULL;
349   }
350 
351   intptr_t data() const;
352   void  set_data(intptr_t x);
353 
354   void flush() {
355     if (!maybe_cpool_ref(instruction_address())) {
356       ICache::invalidate_range(instruction_address(), movptr_instruction_size);
357     }
358   }
359 
360   void  verify();
361   void  print();
362 
363   // Creation
364   inline friend NativeMovConstReg* nativeMovConstReg_at(address addr);
365   inline friend NativeMovConstReg* nativeMovConstReg_before(address addr);
366 };
367 
368 inline NativeMovConstReg* nativeMovConstReg_at(address addr) {
369   assert_cond(addr != NULL);
370   NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_offset);
371 #ifdef ASSERT
372   test->verify();
373 #endif
374   return test;
375 }
376 
377 inline NativeMovConstReg* nativeMovConstReg_before(address addr) {
378   assert_cond(addr != NULL);
379   NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
380 #ifdef ASSERT
381   test->verify();
382 #endif
383   return test;
384 }
385 
386 // RISCV64 should not use C1 runtime patching, so just leave NativeMovRegMem Unimplemented.
387 class NativeMovRegMem: public NativeInstruction {
388  public:
389   int instruction_start() const {
390     Unimplemented();
391     return 0;
392   }
393 
394   address instruction_address() const {
395     Unimplemented();
396     return NULL;
397   }
398 
399   int num_bytes_to_end_of_patch() const {
400     Unimplemented();
401     return 0;
402   }
403 
404   int offset() const;
405 
406   void set_offset(int x);
407 
408   void add_offset_in_bytes(int add_offset) { Unimplemented(); }
409 
410   void verify();
411   void print();
412 
413  private:
414   inline friend NativeMovRegMem* nativeMovRegMem_at (address addr);
415 };
416 
417 inline NativeMovRegMem* nativeMovRegMem_at (address addr) {
418   Unimplemented();
419   return NULL;
420 }
421 
422 class NativeJump: public NativeInstruction {
423  public:
424   enum RISCV64_specific_constants {
425     instruction_size            =    4,
426     instruction_offset          =    0,
427     data_offset                 =    0,
428     next_instruction_offset     =    4
429   };
430 
431   address instruction_address() const       { return addr_at(instruction_offset); }
432   address next_instruction_address() const  { return addr_at(instruction_size); }
433   address jump_destination() const;
434   void set_jump_destination(address dest);
435 
436   // Creation
437   inline friend NativeJump* nativeJump_at(address address);
438 
439   void verify();
440 
441   // Unit testing stuff
442   static void test() {}
443 
444   // Insertion of native jump instruction
445   static void insert(address code_pos, address entry);
446   // MT-safe insertion of native jump at verified method entry
447   static void check_verified_entry_alignment(address entry, address verified_entry);
448   static void patch_verified_entry(address entry, address verified_entry, address dest);
449 };
450 
451 inline NativeJump* nativeJump_at(address addr) {
452   NativeJump* jump = (NativeJump*)(addr - NativeJump::instruction_offset);
453 #ifdef ASSERT
454   jump->verify();
455 #endif
456   return jump;
457 }
458 
459 class NativeGeneralJump: public NativeJump {
460 public:
461   enum RISCV64_specific_constants {
462     instruction_size            =    6 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli, jalr
463     instruction_offset          =    0,
464     data_offset                 =    0,
465     next_instruction_offset     =    6 * NativeInstruction::instruction_size  // lui, addi, slli, addi, slli, jalr
466   };
467 
468   address jump_destination() const;
469 
470   static void insert_unconditional(address code_pos, address entry);
471   static void replace_mt_safe(address instr_addr, address code_buffer);
472 };
473 
474 inline NativeGeneralJump* nativeGeneralJump_at(address addr) {
475   assert_cond(addr != NULL);
476   NativeGeneralJump* jump = (NativeGeneralJump*)(addr);
477   debug_only(jump->verify();)
478   return jump;
479 }
480 
481 class NativeIllegalInstruction: public NativeInstruction {
482  public:
483   // Insert illegal opcode as specific address
484   static void insert(address code_pos);
485 };
486 
487 inline bool NativeInstruction::is_nop()         {
488   uint32_t insn = *(uint32_t*)addr_at(0);
489   return insn == 0x13;
490 }
491 
492 inline bool NativeInstruction::is_jump_or_nop() {
493   return is_nop() || is_jump();
494 }
495 
496 // Call trampoline stubs.
497 class NativeCallTrampolineStub : public NativeInstruction {
498  public:
499 
500   enum RISCV64_specific_constants {
501     // Refer to function emit_trampoline_stub.
502     instruction_size = 3 * NativeInstruction::instruction_size + wordSize, // auipc + ld + jr + target address
503     data_offset      = 3 * NativeInstruction::instruction_size,            // auipc + ld + jr
504   };
505 
506   address destination(nmethod *nm = NULL) const;
507   void set_destination(address new_destination);
508   ptrdiff_t destination_offset() const;
509 };
510 
511 inline bool is_NativeCallTrampolineStub_at(address addr) {
512   // Ensure that the stub is exactly
513   //      ld   t0, L--->auipc + ld
514   //      jr   t0
515   // L:
516 
517   // judge inst + register + imm
518   // 1). check the instructions: auipc + ld + jalr
519   // 2). check if auipc[11:7] == t0 and ld[11:7] == t0 and ld[19:15] == t0 && jr[19:15] == t0
520   // 3). check if the offset in ld[31:20] equals the data_offset
521   assert_cond(addr != NULL);
522   if (NativeInstruction::is_auipc_at(addr) && NativeInstruction::is_ld_at(addr + 4) && NativeInstruction::is_jalr_at(addr + 8) &&
523       ((Register)(intptr_t)Assembler::extract(((unsigned*)addr)[0], 11, 7)     == x5) &&
524       ((Register)(intptr_t)Assembler::extract(((unsigned*)addr)[1], 11, 7)     == x5) &&
525       ((Register)(intptr_t)Assembler::extract(((unsigned*)addr)[1], 19, 15)    == x5) &&
526       ((Register)(intptr_t)Assembler::extract(((unsigned*)addr)[2], 19, 15)    == x5) &&
527       (Assembler::extract(((unsigned*)addr)[1], 31, 20) == NativeCallTrampolineStub::data_offset)) {
528     return true;
529   }
530   return false;
531 }
532 
533 inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) {
534   assert_cond(addr != NULL);
535   assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found");
536   return (NativeCallTrampolineStub*)addr;
537 }
538 
539 class NativeMembar : public NativeInstruction {
540 public:
541   uint32_t get_kind();
542   void set_kind(uint32_t order_kind);
543 };
544 
545 inline NativeMembar *NativeMembar_at(address addr) {
546   assert_cond(addr != NULL);
547   assert(nativeInstruction_at(addr)->is_membar(), "no membar found");
548   return (NativeMembar*)addr;
549 }
550 
551 class NativeFenceI : public NativeInstruction {
552 public:
553   static inline int instruction_size() {
554     // 2 for fence.i + fence
555     return (UseConservativeFence ? 2 : 1) * NativeInstruction::instruction_size;
556   }
557 };
558 
559 #endif // CPU_RISCV_NATIVEINST_RISCV_HPP