1 /*
  2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef CPU_X86_NATIVEINST_X86_HPP
 26 #define CPU_X86_NATIVEINST_X86_HPP
 27 
 28 #include "asm/assembler.hpp"
 29 #include "runtime/icache.hpp"
 30 #include "runtime/safepointMechanism.hpp"
 31 
 32 // We have interfaces for the following instructions:
 33 // - NativeInstruction
 34 // - - NativeCall
 35 // - - NativeMovConstReg
 36 // - - NativeMovConstRegPatching
 37 // - - NativeMovRegMem
 38 // - - NativeMovRegMemPatching
 39 // - - NativeJump
 40 // - - NativeIllegalOpCode
 41 // - - NativeGeneralJump
 42 // - - NativeReturn
 43 // - - NativeReturnX (return with argument)
 44 // - - NativePushConst
 45 // - - NativeTstRegMem
 46 
 47 // The base class for different kinds of native instruction abstractions.
 48 // Provides the primitive operations to manipulate code relative to this.
 49 
 50 class NativeInstruction {
 51   friend class Relocation;
 52 
 53  public:
 54   enum Intel_specific_constants {
 55     nop_instruction_code        = 0x90,
 56     nop_instruction_size        =    1
 57   };
 58 
 59   bool is_nop()                        { return ubyte_at(0) == nop_instruction_code; }
 60   inline bool is_call();
 61   inline bool is_call_reg();
 62   inline bool is_illegal();
 63   inline bool is_return();
 64   inline bool is_jump();
 65   inline bool is_jump_reg();
 66   inline bool is_cond_jump();
 67   inline bool is_safepoint_poll();
 68   inline bool is_mov_literal64();
 69 
 70  protected:
 71   address addr_at(int offset) const    { return address(this) + offset; }
 72 
 73   s_char sbyte_at(int offset) const    { return *(s_char*) addr_at(offset); }
 74   u_char ubyte_at(int offset) const    { return *(u_char*) addr_at(offset); }
 75 
 76   jint int_at(int offset) const         { return *(jint*) addr_at(offset); }
 77 
 78   intptr_t ptr_at(int offset) const    { return *(intptr_t*) addr_at(offset); }
 79 
 80   oop  oop_at (int offset) const       { return *(oop*) addr_at(offset); }
 81 
 82 
 83   void set_char_at(int offset, u_char c)        { *addr_at(offset) = c; wrote(offset); }
 84   void set_int_at(int offset, jint  i)        { *(jint*)addr_at(offset) = i;  wrote(offset); }
 85   void set_ptr_at (int offset, intptr_t  ptr) { *(intptr_t*) addr_at(offset) = ptr;  wrote(offset); }
 86   void set_oop_at (int offset, oop  o)        { *(oop*) addr_at(offset) = o;  wrote(offset); }
 87 
 88   // This doesn't really do anything on Intel, but it is the place where
 89   // cache invalidation belongs, generically:
 90   void wrote(int offset);
 91 
 92  public:
 93   bool has_rex2_prefix() const { return ubyte_at(0) == Assembler::REX2; }
 94 
 95   inline friend NativeInstruction* nativeInstruction_at(address address);
 96 };
 97 
 98 inline NativeInstruction* nativeInstruction_at(address address) {
 99   NativeInstruction* inst = (NativeInstruction*)address;
100 #ifdef ASSERT
101   //inst->verify();
102 #endif
103   return inst;
104 }
105 
106 class NativeCall;
107 inline NativeCall* nativeCall_at(address address);
108 // The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off
109 // instructions (used to manipulate inline caches, primitive & dll calls, etc.).
110 
111 class NativeCall: public NativeInstruction {
112  public:
113   enum Intel_specific_constants {
114     instruction_code            = 0xE8,
115     instruction_size            =    5,
116     instruction_offset          =    0,
117     displacement_offset         =    1,
118     return_address_offset       =    5
119   };
120 
121   static int byte_size()                    { return instruction_size; }
122   address instruction_address() const       { return addr_at(instruction_offset); }
123   address next_instruction_address() const  { return addr_at(return_address_offset); }
124   int   displacement() const                { return (jint) int_at(displacement_offset); }
125   address displacement_address() const      { return addr_at(displacement_offset); }
126   address return_address() const            { return addr_at(return_address_offset); }
127   address destination() const;
128   void  set_destination(address dest)       {
129 #ifdef AMD64
130     intptr_t disp = dest - return_address();
131     guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
132 #endif // AMD64
133     set_int_at(displacement_offset, (int)(dest - return_address()));
134   }
135   // Returns whether the 4-byte displacement operand is 4-byte aligned.
136   bool  is_displacement_aligned();
137   void  set_destination_mt_safe(address dest);
138 
139   void  verify_alignment() { assert(is_displacement_aligned(), "displacement of call is not aligned"); }
140   void  verify();
141   void  print();
142 
143   // Creation
144   inline friend NativeCall* nativeCall_at(address address);
145   inline friend NativeCall* nativeCall_before(address return_address);
146 
147   static bool is_call_at(address instr) {
148     return ((*instr) & 0xFF) == NativeCall::instruction_code;
149   }
150 
151   static bool is_call_before(address return_address) {
152     return is_call_at(return_address - NativeCall::return_address_offset);
153   }
154 
155   static bool is_call_to(address instr, address target) {
156     return nativeInstruction_at(instr)->is_call() &&
157       nativeCall_at(instr)->destination() == target;
158   }
159 
160   // MT-safe patching of a call instruction.
161   static void insert(address code_pos, address entry);
162 
163   static void replace_mt_safe(address instr_addr, address code_buffer);
164 };
165 
166 inline NativeCall* nativeCall_at(address address) {
167   NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset);
168 #ifdef ASSERT
169   call->verify();
170 #endif
171   return call;
172 }
173 
174 inline NativeCall* nativeCall_before(address return_address) {
175   NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
176 #ifdef ASSERT
177   call->verify();
178 #endif
179   return call;
180 }
181 
182 // Call with target address in a general purpose register(indirect absolute addressing).
183 // Encoding : FF /2  CALL r/m32
184 // Primary Opcode: FF
185 // Opcode Extension(part of ModRM.REG): /2
186 // Operand ModRM.RM  = r/m32
187 class NativeCallReg: public NativeInstruction {
188  public:
189   enum Intel_specific_constants {
190     instruction_code            = 0xFF,
191     instruction_offset          =    0,
192     return_address_offset_norex =    2,
193     return_address_offset_rex   =    3,
194     return_address_offset_rex2  =    4
195   };
196 
197   int next_instruction_offset() const  {
198     if (ubyte_at(0) == NativeCallReg::instruction_code) {
199       return return_address_offset_norex;
200     } else if (has_rex2_prefix()) {
201       return return_address_offset_rex2;
202     } else {
203       assert((ubyte_at(0) & 0xF0) ==  Assembler::REX, "");
204       return return_address_offset_rex;
205     }
206   }
207 };
208 
209 // An interface for accessing/manipulating native mov reg, imm32 instructions.
210 // (used to manipulate inlined 32bit data dll calls, etc.)
211 // Instruction format for implied addressing mode immediate operand move to register instruction:
212 //  [REX/REX2] [OPCODE] [IMM32]
213 class NativeMovConstReg: public NativeInstruction {
214 #ifdef AMD64
215   static const bool has_rex = true;
216   static const int rex_size = 1;
217   static const int rex2_size = 2;
218 #else
219   static const bool has_rex = false;
220   static const int rex_size = 0;
221   static const int rex2_size = 0;
222 #endif // AMD64
223  public:
224   enum Intel_specific_constants {
225     instruction_code             = 0xB8,
226     instruction_offset           =    0,
227     instruction_size_rex         =    1 + rex_size + wordSize,
228     instruction_size_rex2        =    1 + rex2_size + wordSize,
229     data_offset_rex              =    1 + rex_size,
230     data_offset_rex2             =    1 + rex2_size,
231     next_instruction_offset_rex  =    instruction_size_rex,
232     next_instruction_offset_rex2 =    instruction_size_rex2,
233     register_mask                = 0x07
234   };
235 
236   int instruction_size() const              { return has_rex2_prefix() ? instruction_size_rex2 : instruction_size_rex; }
237   int next_inst_offset() const              { return has_rex2_prefix() ? next_instruction_offset_rex2 : next_instruction_offset_rex; }
238   int data_byte_offset() const              { return has_rex2_prefix() ? data_offset_rex2 : data_offset_rex;}
239   address instruction_address() const       { return addr_at(instruction_offset); }
240   address next_instruction_address() const  { return addr_at(next_inst_offset()); }
241   intptr_t data() const                     { return ptr_at(data_byte_offset()); }
242   void  set_data(intptr_t x)                { set_ptr_at(data_byte_offset(), x); }
243 
244   void  verify();
245   void  print();
246 
247   // Creation
248   inline friend NativeMovConstReg* nativeMovConstReg_at(address address);
249   inline friend NativeMovConstReg* nativeMovConstReg_before(address address);
250 };
251 
252 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
253   NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset);
254 #ifdef ASSERT
255   test->verify();
256 #endif
257   return test;
258 }
259 
260 inline NativeMovConstReg* nativeMovConstReg_before(address address) {
261   int instruction_size = ((NativeInstruction*)(address))->has_rex2_prefix() ?
262                                   NativeMovConstReg::instruction_size_rex2 :
263                                   NativeMovConstReg::instruction_size_rex;
264   NativeMovConstReg* test = (NativeMovConstReg*)(address - instruction_size - NativeMovConstReg::instruction_offset);
265 #ifdef ASSERT
266   test->verify();
267 #endif
268   return test;
269 }
270 
271 class NativeMovConstRegPatching: public NativeMovConstReg {
272  private:
273     friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
274     NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset);
275     #ifdef ASSERT
276       test->verify();
277     #endif
278     return test;
279   }
280 };
281 
282 // An interface for accessing/manipulating native moves of the form:
283 //      mov[b/w/l/q] [reg + offset], reg   (instruction_code_reg2mem)
284 //      mov[b/w/l/q] reg, [reg+offset]     (instruction_code_mem2reg
285 //      mov[s/z]x[w/b/q] [reg + offset], reg
286 //      fld_s  [reg+offset]
287 //      fld_d  [reg+offset]
288 //      fstp_s [reg + offset]
289 //      fstp_d [reg + offset]
290 //      mov_literal64  scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
291 //
292 // Warning: These routines must be able to handle any instruction sequences
293 // that are generated as a result of the load/store byte,word,long
294 // macros.  For example: The load_unsigned_byte instruction generates
295 // an xor reg,reg inst prior to generating the movb instruction.  This
296 // class must skip the xor instruction.
297 
298 class NativeMovRegMem: public NativeInstruction {
299  public:
300   enum Intel_specific_constants {
301     instruction_prefix_wide_lo          = Assembler::REX,
302     instruction_prefix_wide_hi          = Assembler::REX_WRXB,
303     instruction_code_xor                = 0x33,
304     instruction_extended_prefix         = 0x0F,
305 
306     // Legacy encoding MAP1 instructions promotable to REX2 encoding.
307     instruction_code_mem2reg_movslq     = 0x63,
308     instruction_code_mem2reg_movzxb     = 0xB6,
309     instruction_code_mem2reg_movsxb     = 0xBE,
310     instruction_code_mem2reg_movzxw     = 0xB7,
311     instruction_code_mem2reg_movsxw     = 0xBF,
312     instruction_operandsize_prefix      = 0x66,
313 
314     // Legacy encoding MAP0 instructions promotable to REX2 encoding.
315     instruction_code_reg2mem            = 0x89,
316     instruction_code_mem2reg            = 0x8b,
317     instruction_code_reg2memb           = 0x88,
318     instruction_code_mem2regb           = 0x8a,
319     instruction_code_lea                = 0x8d,
320 
321     instruction_code_float_s            = 0xd9,
322     instruction_code_float_d            = 0xdd,
323     instruction_code_long_volatile      = 0xdf,
324 
325     // VEX/EVEX/Legacy encodeded MAP1 instructions promotable to REX2 encoding.
326     instruction_code_xmm_ss_prefix      = 0xf3,
327     instruction_code_xmm_sd_prefix      = 0xf2,
328 
329     instruction_code_xmm_code           = 0x0f,
330 
331     // Address operand load/store/ldp are promotable to REX2 to accomodate
332     // extended SIB encoding.
333     instruction_code_xmm_load           = 0x10,
334     instruction_code_xmm_store          = 0x11,
335     instruction_code_xmm_lpd            = 0x12,
336 
337     instruction_VEX_prefix_2bytes       = Assembler::VEX_2bytes,
338     instruction_VEX_prefix_3bytes       = Assembler::VEX_3bytes,
339     instruction_EVEX_prefix_4bytes      = Assembler::EVEX_4bytes,
340     instruction_REX2_prefix             = Assembler::REX2,
341 
342     instruction_offset                  = 0,
343     data_offset                         = 2,
344     next_instruction_offset_rex         = 4,
345     next_instruction_offset_rex2        = 5
346   };
347 
348   // helper
349   int instruction_start() const;
350 
351   address instruction_address() const {
352     return addr_at(instruction_start());
353   }
354 
355   int num_bytes_to_end_of_patch() const {
356     return patch_offset() + sizeof(jint);
357   }
358 
359   int offset() const {
360     return int_at(patch_offset());
361   }
362 
363   void set_offset(int x) {
364     set_int_at(patch_offset(), x);
365   }
366 
367   void add_offset_in_bytes(int add_offset) {
368     int patch_off = patch_offset();
369     set_int_at(patch_off, int_at(patch_off) + add_offset);
370   }
371 
372   void verify();
373   void print ();
374 
375  private:
376   int patch_offset() const;
377   inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
378 };
379 
380 inline NativeMovRegMem* nativeMovRegMem_at (address address) {
381   NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset);
382 #ifdef ASSERT
383   test->verify();
384 #endif
385   return test;
386 }
387 
388 
389 // An interface for accessing/manipulating native leal instruction of form:
390 //        leal reg, [reg + offset]
391 
392 class NativeLoadAddress: public NativeMovRegMem {
393 #ifdef AMD64
394   static const bool has_rex = true;
395   static const int rex_size = 1;
396 #else
397   static const bool has_rex = false;
398   static const int rex_size = 0;
399 #endif // AMD64
400  public:
401   enum Intel_specific_constants {
402     instruction_prefix_wide             = Assembler::REX_W,
403     instruction_prefix_wide_extended    = Assembler::REX_WB,
404     lea_instruction_code                = 0x8D,
405     mov64_instruction_code              = 0xB8
406   };
407 
408   void verify();
409   void print ();
410 
411  private:
412   friend NativeLoadAddress* nativeLoadAddress_at (address address) {
413     NativeLoadAddress* test = (NativeLoadAddress*)(address - instruction_offset);
414     #ifdef ASSERT
415       test->verify();
416     #endif
417     return test;
418   }
419 };
420 
421 class NativeJump: public NativeInstruction {
422  public:
423   enum Intel_specific_constants {
424     instruction_code            = 0xe9,
425     instruction_size            =    5,
426     instruction_offset          =    0,
427     data_offset                 =    1,
428     next_instruction_offset     =    5
429   };
430 
431   address instruction_address() const       { return addr_at(instruction_offset); }
432   address next_instruction_address() const  { return addr_at(next_instruction_offset); }
433   address jump_destination() const          {
434      address dest = (int_at(data_offset)+next_instruction_address());
435      // 32bit used to encode unresolved jmp as jmp -1
436      // 64bit can't produce this so it used jump to self.
437      // Now 32bit and 64bit use jump to self as the unresolved address
438      // which the inline cache code (and relocs) know about
439 
440      // return -1 if jump to self
441     dest = (dest == (address) this) ? (address) -1 : dest;
442     return dest;
443   }
444 
445   void  set_jump_destination(address dest)  {
446     intptr_t val = dest - next_instruction_address();
447     if (dest == (address) -1) {
448       val = -5; // jump to self
449     }
450 #ifdef AMD64
451     assert((labs(val)  & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1");
452 #endif // AMD64
453     set_int_at(data_offset, (jint)val);
454   }
455 
456   // Creation
457   inline friend NativeJump* nativeJump_at(address address);
458 
459   void verify();
460 
461   // Insertion of native jump instruction
462   static void insert(address code_pos, address entry);
463   // MT-safe insertion of native jump at verified method entry
464   static void check_verified_entry_alignment(address entry, address verified_entry);
465   static void patch_verified_entry(address entry, address verified_entry, address dest);
466 };
467 
468 inline NativeJump* nativeJump_at(address address) {
469   NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset);
470 #ifdef ASSERT
471   jump->verify();
472 #endif
473   return jump;
474 }
475 
476 // Handles all kinds of jump on Intel. Long/far, conditional/unconditional with relative offsets
477 // barring register indirect jumps.
478 class NativeGeneralJump: public NativeInstruction {
479  public:
480   enum Intel_specific_constants {
481     // Constants does not apply, since the lengths and offsets depends on the actual jump
482     // used
483     // Instruction codes:
484     //   Unconditional jumps: 0xE9    (rel32off), 0xEB (rel8off)
485     //   Conditional jumps:   0x0F8x  (rel32off), 0x7x (rel8off)
486     unconditional_long_jump  = 0xe9,
487     unconditional_short_jump = 0xeb,
488     instruction_size = 5
489   };
490 
491   address instruction_address() const       { return addr_at(0); }
492   address jump_destination()    const;
493 
494   // Creation
495   inline friend NativeGeneralJump* nativeGeneralJump_at(address address);
496 
497   // Insertion of native general jump instruction
498   static void insert_unconditional(address code_pos, address entry);
499   static void replace_mt_safe(address instr_addr, address code_buffer);
500 
501   void verify();
502 };
503 
504 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
505   NativeGeneralJump* jump = (NativeGeneralJump*)(address);
506   debug_only(jump->verify();)
507   return jump;
508 }
509 
510 class NativeIllegalInstruction: public NativeInstruction {
511  public:
512   enum Intel_specific_constants {
513     instruction_code            = 0x0B0F,    // Real byte order is: 0x0F, 0x0B
514     instruction_size            =    2,
515     instruction_offset          =    0,
516     next_instruction_offset     =    2
517   };
518 
519   // Insert illegal opcode as specific address
520   static void insert(address code_pos);
521 };
522 
523 // return instruction that does not pop values of the stack
524 class NativeReturn: public NativeInstruction {
525  public:
526   enum Intel_specific_constants {
527     instruction_code            = 0xC3,
528     instruction_size            =    1,
529     instruction_offset          =    0,
530     next_instruction_offset     =    1
531   };
532 };
533 
534 // return instruction that does pop values of the stack
535 class NativeReturnX: public NativeInstruction {
536  public:
537   enum Intel_specific_constants {
538     instruction_code            = 0xC2,
539     instruction_size            =    2,
540     instruction_offset          =    0,
541     next_instruction_offset     =    2
542   };
543 };
544 
545 // Simple test vs memory
546 class NativeTstRegMem: public NativeInstruction {
547  public:
548   enum Intel_specific_constants {
549     instruction_rex_prefix_mask = 0xF0,
550     instruction_rex_prefix      = Assembler::REX,
551     instruction_rex_b_prefix    = Assembler::REX_B,
552     instruction_code_memXregl   = 0x85,
553     modrm_mask                  = 0x38, // select reg from the ModRM byte
554     modrm_reg                   = 0x00  // rax
555   };
556 };
557 
558 inline bool NativeInstruction::is_illegal()      { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; }
559 inline bool NativeInstruction::is_call()         { return ubyte_at(0) == NativeCall::instruction_code; }
560 inline bool NativeInstruction::is_call_reg()     { return ubyte_at(0) == NativeCallReg::instruction_code ||
561                                                           (ubyte_at(1) == NativeCallReg::instruction_code &&
562                                                            (ubyte_at(0) == Assembler::REX || ubyte_at(0) == Assembler::REX_B)); }
563 inline bool NativeInstruction::is_return()       { return ubyte_at(0) == NativeReturn::instruction_code ||
564                                                           ubyte_at(0) == NativeReturnX::instruction_code; }
565 inline bool NativeInstruction::is_jump()         { return ubyte_at(0) == NativeJump::instruction_code ||
566                                                           ubyte_at(0) == 0xEB; /* short jump */ }
567 inline bool NativeInstruction::is_jump_reg()     {
568   int pos = 0;
569   if (ubyte_at(0) == Assembler::REX_B) pos = 1;
570   return ubyte_at(pos) == 0xFF && (ubyte_at(pos + 1) & 0xF0) == 0xE0;
571 }
572 inline bool NativeInstruction::is_cond_jump()    { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ ||
573                                                           (ubyte_at(0) & 0xF0) == 0x70;  /* short jump */ }
574 inline bool NativeInstruction::is_safepoint_poll() {
575 #ifdef AMD64
576   const bool has_rex_prefix = ubyte_at(0) == NativeTstRegMem::instruction_rex_b_prefix;
577   const int test_offset = has_rex2_prefix() ? 2 : (has_rex_prefix ? 1 : 0);
578 #else
579   const int test_offset = 0;
580 #endif
581   const bool is_test_opcode = ubyte_at(test_offset) == NativeTstRegMem::instruction_code_memXregl;
582   const bool is_rax_target = (ubyte_at(test_offset + 1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg;
583   return is_test_opcode && is_rax_target;
584 }
585 
586 inline bool NativeInstruction::is_mov_literal64() {
587 #ifdef AMD64
588   bool valid_rex_prefix  = ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB;
589   bool valid_rex2_prefix = ubyte_at(0) == Assembler::REX2  &&
590        (ubyte_at(1) == Assembler::REX2BIT_W  ||
591         ubyte_at(1) == Assembler::REX2BIT_WB ||
592         ubyte_at(1) == Assembler::REX2BIT_WB4);
593 
594   int opcode = has_rex2_prefix() ? ubyte_at(2) : ubyte_at(1);
595   return ((valid_rex_prefix || valid_rex2_prefix) &&  (opcode & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8);
596 #else
597   return false;
598 #endif // AMD64
599 }
600 
601 class NativePostCallNop: public NativeInstruction {
602 public:
603   enum Intel_specific_constants {
604     instruction_code = 0x0f,
605     instruction_size = 8,
606     instruction_offset = 0,
607     displacement_offset = 4
608   };
609 
610   bool check() const { return int_at(0) == 0x841f0f; }
611   bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const {
612     int32_t data = int_at(displacement_offset);
613     if (data == 0) {
614       return false; // no information encoded
615     }
616     cb_offset = (data & 0xffffff);
617     oopmap_slot = (data >> 24) & 0xff;
618     return true; // decoding succeeded
619   }
620   bool patch(int32_t oopmap_slot, int32_t cb_offset);
621   void make_deopt();
622 };
623 
624 inline NativePostCallNop* nativePostCallNop_at(address address) {
625   NativePostCallNop* nop = (NativePostCallNop*) address;
626   if (nop->check()) {
627     return nop;
628   }
629   return nullptr;
630 }
631 
632 inline NativePostCallNop* nativePostCallNop_unsafe_at(address address) {
633   NativePostCallNop* nop = (NativePostCallNop*) address;
634   assert(nop->check(), "");
635   return nop;
636 }
637 
638 class NativeDeoptInstruction: public NativeInstruction {
639  public:
640   enum Intel_specific_constants {
641     instruction_prefix          = 0x0F,
642     instruction_code            = 0xFF,
643     instruction_size            =    3,
644     instruction_offset          =    0,
645   };
646 
647   address instruction_address() const       { return addr_at(instruction_offset); }
648   address next_instruction_address() const  { return addr_at(instruction_size); }
649 
650   void  verify();
651 
652   static bool is_deopt_at(address instr) {
653     return ((*instr) & 0xFF) == NativeDeoptInstruction::instruction_prefix &&
654       ((*(instr+1)) & 0xFF) == NativeDeoptInstruction::instruction_code;
655   }
656 
657   // MT-safe patching
658   static void insert(address code_pos, bool invalidate = true);
659 };
660 
661 #endif // CPU_X86_NATIVEINST_X86_HPP