1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_NATIVEINST_X86_HPP 26 #define CPU_X86_NATIVEINST_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "runtime/icache.hpp" 30 #include "runtime/safepointMechanism.hpp" 31 32 // We have interfaces for the following instructions: 33 // - NativeInstruction 34 // - - NativeCall 35 // - - NativeMovConstReg 36 // - - NativeMovConstRegPatching 37 // - - NativeMovRegMem 38 // - - NativeMovRegMemPatching 39 // - - NativeJump 40 // - - NativeIllegalOpCode 41 // - - NativeGeneralJump 42 // - - NativeReturn 43 // - - NativeReturnX (return with argument) 44 // - - NativePushConst 45 // - - NativeTstRegMem 46 47 // The base class for different kinds of native instruction abstractions. 48 // Provides the primitive operations to manipulate code relative to this. 49 50 class NativeInstruction { 51 friend class Relocation; 52 53 public: 54 enum Intel_specific_constants { 55 nop_instruction_code = 0x90, 56 nop_instruction_size = 1 57 }; 58 59 bool is_nop() { return ubyte_at(0) == nop_instruction_code; } 60 inline bool is_call(); 61 inline bool is_call_reg(); 62 inline bool is_illegal(); 63 inline bool is_return(); 64 inline bool is_jump(); 65 inline bool is_jump_reg(); 66 inline bool is_cond_jump(); 67 inline bool is_safepoint_poll(); 68 inline bool is_mov_literal64(); 69 70 protected: 71 address addr_at(int offset) const { return address(this) + offset; } 72 73 s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); } 74 u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); } 75 76 jint int_at(int offset) const { return *(jint*) addr_at(offset); } 77 78 intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); } 79 80 oop oop_at (int offset) const { return *(oop*) addr_at(offset); } 81 82 83 void set_char_at(int offset, u_char c) { *addr_at(offset) = c; wrote(offset); } 84 void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; wrote(offset); } 85 void set_ptr_at (int offset, intptr_t ptr) { *(intptr_t*) addr_at(offset) = ptr; wrote(offset); } 86 void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; wrote(offset); } 87 88 // This doesn't really do anything on Intel, but it is the place where 89 // cache invalidation belongs, generically: 90 void wrote(int offset); 91 92 public: 93 bool has_rex2_prefix() const { return ubyte_at(0) == Assembler::REX2; } 94 95 inline friend NativeInstruction* nativeInstruction_at(address address); 96 }; 97 98 inline NativeInstruction* nativeInstruction_at(address address) { 99 NativeInstruction* inst = (NativeInstruction*)address; 100 #ifdef ASSERT 101 //inst->verify(); 102 #endif 103 return inst; 104 } 105 106 class NativeCall; 107 inline NativeCall* nativeCall_at(address address); 108 // The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off 109 // instructions (used to manipulate inline caches, primitive & dll calls, etc.). 110 111 class NativeCall: public NativeInstruction { 112 public: 113 enum Intel_specific_constants { 114 instruction_code = 0xE8, 115 instruction_size = 5, 116 instruction_offset = 0, 117 displacement_offset = 1, 118 return_address_offset = 5 119 }; 120 121 static int byte_size() { return instruction_size; } 122 address instruction_address() const { return addr_at(instruction_offset); } 123 address next_instruction_address() const { return addr_at(return_address_offset); } 124 int displacement() const { return (jint) int_at(displacement_offset); } 125 address displacement_address() const { return addr_at(displacement_offset); } 126 address return_address() const { return addr_at(return_address_offset); } 127 address destination() const; 128 void set_destination(address dest) { 129 intptr_t disp = dest - return_address(); 130 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); 131 set_int_at(displacement_offset, (int)(dest - return_address())); 132 } 133 // Returns whether the 4-byte displacement operand is 4-byte aligned. 134 bool is_displacement_aligned(); 135 void set_destination_mt_safe(address dest); 136 137 void verify_alignment() { assert(is_displacement_aligned(), "displacement of call is not aligned"); } 138 void verify(); 139 void print(); 140 141 // Creation 142 inline friend NativeCall* nativeCall_at(address address); 143 inline friend NativeCall* nativeCall_before(address return_address); 144 145 static bool is_call_at(address instr) { 146 return ((*instr) & 0xFF) == NativeCall::instruction_code; 147 } 148 149 static bool is_call_before(address return_address) { 150 return is_call_at(return_address - NativeCall::return_address_offset); 151 } 152 153 static bool is_call_to(address instr, address target) { 154 return nativeInstruction_at(instr)->is_call() && 155 nativeCall_at(instr)->destination() == target; 156 } 157 158 // MT-safe patching of a call instruction. 159 static void insert(address code_pos, address entry); 160 161 static void replace_mt_safe(address instr_addr, address code_buffer); 162 }; 163 164 inline NativeCall* nativeCall_at(address address) { 165 NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset); 166 #ifdef ASSERT 167 call->verify(); 168 #endif 169 return call; 170 } 171 172 inline NativeCall* nativeCall_before(address return_address) { 173 NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); 174 #ifdef ASSERT 175 call->verify(); 176 #endif 177 return call; 178 } 179 180 // Call with target address in a general purpose register(indirect absolute addressing). 181 // Encoding : FF /2 CALL r/m32 182 // Primary Opcode: FF 183 // Opcode Extension(part of ModRM.REG): /2 184 // Operand ModRM.RM = r/m32 185 class NativeCallReg: public NativeInstruction { 186 public: 187 enum Intel_specific_constants { 188 instruction_code = 0xFF, 189 instruction_offset = 0, 190 return_address_offset_norex = 2, 191 return_address_offset_rex = 3, 192 return_address_offset_rex2 = 4 193 }; 194 195 int next_instruction_offset() const { 196 if (ubyte_at(0) == NativeCallReg::instruction_code) { 197 return return_address_offset_norex; 198 } else if (has_rex2_prefix()) { 199 return return_address_offset_rex2; 200 } else { 201 assert((ubyte_at(0) & 0xF0) == Assembler::REX, ""); 202 return return_address_offset_rex; 203 } 204 } 205 }; 206 207 // An interface for accessing/manipulating native mov reg, imm32 instructions. 208 // (used to manipulate inlined 32bit data dll calls, etc.) 209 // Instruction format for implied addressing mode immediate operand move to register instruction: 210 // [REX/REX2] [OPCODE] [IMM32] 211 class NativeMovConstReg: public NativeInstruction { 212 static const bool has_rex = true; 213 static const int rex_size = 1; 214 static const int rex2_size = 2; 215 public: 216 enum Intel_specific_constants { 217 instruction_code = 0xB8, 218 instruction_offset = 0, 219 instruction_size_rex = 1 + rex_size + wordSize, 220 instruction_size_rex2 = 1 + rex2_size + wordSize, 221 data_offset_rex = 1 + rex_size, 222 data_offset_rex2 = 1 + rex2_size, 223 next_instruction_offset_rex = instruction_size_rex, 224 next_instruction_offset_rex2 = instruction_size_rex2, 225 register_mask = 0x07 226 }; 227 228 int instruction_size() const { return has_rex2_prefix() ? instruction_size_rex2 : instruction_size_rex; } 229 int next_inst_offset() const { return has_rex2_prefix() ? next_instruction_offset_rex2 : next_instruction_offset_rex; } 230 int data_byte_offset() const { return has_rex2_prefix() ? data_offset_rex2 : data_offset_rex;} 231 address instruction_address() const { return addr_at(instruction_offset); } 232 address next_instruction_address() const { return addr_at(next_inst_offset()); } 233 intptr_t data() const { return ptr_at(data_byte_offset()); } 234 void set_data(intptr_t x) { set_ptr_at(data_byte_offset(), x); } 235 236 void verify(); 237 void print(); 238 239 // Creation 240 inline friend NativeMovConstReg* nativeMovConstReg_at(address address); 241 inline friend NativeMovConstReg* nativeMovConstReg_before(address address); 242 }; 243 244 inline NativeMovConstReg* nativeMovConstReg_at(address address) { 245 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset); 246 #ifdef ASSERT 247 test->verify(); 248 #endif 249 return test; 250 } 251 252 inline NativeMovConstReg* nativeMovConstReg_before(address address) { 253 int instruction_size = ((NativeInstruction*)(address))->has_rex2_prefix() ? 254 NativeMovConstReg::instruction_size_rex2 : 255 NativeMovConstReg::instruction_size_rex; 256 NativeMovConstReg* test = (NativeMovConstReg*)(address - instruction_size - NativeMovConstReg::instruction_offset); 257 #ifdef ASSERT 258 test->verify(); 259 #endif 260 return test; 261 } 262 263 class NativeMovConstRegPatching: public NativeMovConstReg { 264 private: 265 friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { 266 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset); 267 #ifdef ASSERT 268 test->verify(); 269 #endif 270 return test; 271 } 272 }; 273 274 // An interface for accessing/manipulating native moves of the form: 275 // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem) 276 // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg 277 // mov[s/z]x[w/b/q] [reg + offset], reg 278 // fld_s [reg+offset] 279 // fld_d [reg+offset] 280 // fstp_s [reg + offset] 281 // fstp_d [reg + offset] 282 // mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch) 283 // 284 // Warning: These routines must be able to handle any instruction sequences 285 // that are generated as a result of the load/store byte,word,long 286 // macros. For example: The load_unsigned_byte instruction generates 287 // an xor reg,reg inst prior to generating the movb instruction. This 288 // class must skip the xor instruction. 289 290 class NativeMovRegMem: public NativeInstruction { 291 public: 292 enum Intel_specific_constants { 293 instruction_prefix_wide_lo = Assembler::REX, 294 instruction_prefix_wide_hi = Assembler::REX_WRXB, 295 instruction_code_xor = 0x33, 296 instruction_extended_prefix = 0x0F, 297 298 // Legacy encoding MAP1 instructions promotable to REX2 encoding. 299 instruction_code_mem2reg_movslq = 0x63, 300 instruction_code_mem2reg_movzxb = 0xB6, 301 instruction_code_mem2reg_movsxb = 0xBE, 302 instruction_code_mem2reg_movzxw = 0xB7, 303 instruction_code_mem2reg_movsxw = 0xBF, 304 instruction_operandsize_prefix = 0x66, 305 306 // Legacy encoding MAP0 instructions promotable to REX2 encoding. 307 instruction_code_reg2mem = 0x89, 308 instruction_code_mem2reg = 0x8b, 309 instruction_code_reg2memb = 0x88, 310 instruction_code_mem2regb = 0x8a, 311 instruction_code_lea = 0x8d, 312 313 instruction_code_float_s = 0xd9, 314 instruction_code_float_d = 0xdd, 315 instruction_code_long_volatile = 0xdf, 316 317 // VEX/EVEX/Legacy encodeded MAP1 instructions promotable to REX2 encoding. 318 instruction_code_xmm_ss_prefix = 0xf3, 319 instruction_code_xmm_sd_prefix = 0xf2, 320 321 instruction_code_xmm_code = 0x0f, 322 323 // Address operand load/store/ldp are promotable to REX2 to accomodate 324 // extended SIB encoding. 325 instruction_code_xmm_load = 0x10, 326 instruction_code_xmm_store = 0x11, 327 instruction_code_xmm_lpd = 0x12, 328 329 instruction_VEX_prefix_2bytes = Assembler::VEX_2bytes, 330 instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes, 331 instruction_EVEX_prefix_4bytes = Assembler::EVEX_4bytes, 332 instruction_REX2_prefix = Assembler::REX2, 333 334 instruction_offset = 0, 335 data_offset = 2, 336 next_instruction_offset_rex = 4, 337 next_instruction_offset_rex2 = 5 338 }; 339 340 // helper 341 int instruction_start() const; 342 343 address instruction_address() const { 344 return addr_at(instruction_start()); 345 } 346 347 int num_bytes_to_end_of_patch() const { 348 return patch_offset() + sizeof(jint); 349 } 350 351 int offset() const { 352 return int_at(patch_offset()); 353 } 354 355 void set_offset(int x) { 356 set_int_at(patch_offset(), x); 357 } 358 359 void add_offset_in_bytes(int add_offset) { 360 int patch_off = patch_offset(); 361 set_int_at(patch_off, int_at(patch_off) + add_offset); 362 } 363 364 void verify(); 365 void print (); 366 367 private: 368 int patch_offset() const; 369 inline friend NativeMovRegMem* nativeMovRegMem_at (address address); 370 }; 371 372 inline NativeMovRegMem* nativeMovRegMem_at (address address) { 373 NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset); 374 #ifdef ASSERT 375 test->verify(); 376 #endif 377 return test; 378 } 379 380 381 // An interface for accessing/manipulating native leal instruction of form: 382 // leal reg, [reg + offset] 383 384 class NativeLoadAddress: public NativeMovRegMem { 385 static const bool has_rex = true; 386 static const int rex_size = 1; 387 public: 388 enum Intel_specific_constants { 389 instruction_prefix_wide = Assembler::REX_W, 390 instruction_prefix_wide_extended = Assembler::REX_WB, 391 lea_instruction_code = 0x8D, 392 mov64_instruction_code = 0xB8 393 }; 394 395 void verify(); 396 void print (); 397 398 private: 399 friend NativeLoadAddress* nativeLoadAddress_at (address address) { 400 NativeLoadAddress* test = (NativeLoadAddress*)(address - instruction_offset); 401 #ifdef ASSERT 402 test->verify(); 403 #endif 404 return test; 405 } 406 }; 407 408 class NativeJump: public NativeInstruction { 409 public: 410 enum Intel_specific_constants { 411 instruction_code = 0xe9, 412 instruction_size = 5, 413 instruction_offset = 0, 414 data_offset = 1, 415 next_instruction_offset = 5 416 }; 417 418 address instruction_address() const { return addr_at(instruction_offset); } 419 address next_instruction_address() const { return addr_at(next_instruction_offset); } 420 address jump_destination() const { 421 address dest = (int_at(data_offset)+next_instruction_address()); 422 // 32bit used to encode unresolved jmp as jmp -1 423 // 64bit can't produce this so it used jump to self. 424 // Now 32bit and 64bit use jump to self as the unresolved address 425 // which the inline cache code (and relocs) know about 426 427 // return -1 if jump to self 428 dest = (dest == (address) this) ? (address) -1 : dest; 429 return dest; 430 } 431 432 void set_jump_destination(address dest) { 433 intptr_t val = dest - next_instruction_address(); 434 if (dest == (address) -1) { 435 val = -5; // jump to self 436 } 437 assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1"); 438 set_int_at(data_offset, (jint)val); 439 } 440 441 // Creation 442 inline friend NativeJump* nativeJump_at(address address); 443 444 void verify(); 445 446 // Insertion of native jump instruction 447 static void insert(address code_pos, address entry); 448 // MT-safe insertion of native jump at verified method entry 449 static void check_verified_entry_alignment(address entry, address verified_entry); 450 static void patch_verified_entry(address entry, address verified_entry, address dest); 451 }; 452 453 inline NativeJump* nativeJump_at(address address) { 454 NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset); 455 #ifdef ASSERT 456 jump->verify(); 457 #endif 458 return jump; 459 } 460 461 // Handles all kinds of jump on Intel. Long/far, conditional/unconditional with relative offsets 462 // barring register indirect jumps. 463 class NativeGeneralJump: public NativeInstruction { 464 public: 465 enum Intel_specific_constants { 466 // Constants does not apply, since the lengths and offsets depends on the actual jump 467 // used 468 // Instruction codes: 469 // Unconditional jumps: 0xE9 (rel32off), 0xEB (rel8off) 470 // Conditional jumps: 0x0F8x (rel32off), 0x7x (rel8off) 471 unconditional_long_jump = 0xe9, 472 unconditional_short_jump = 0xeb, 473 instruction_size = 5 474 }; 475 476 address instruction_address() const { return addr_at(0); } 477 address jump_destination() const; 478 479 // Creation 480 inline friend NativeGeneralJump* nativeGeneralJump_at(address address); 481 482 // Insertion of native general jump instruction 483 static void insert_unconditional(address code_pos, address entry); 484 static void replace_mt_safe(address instr_addr, address code_buffer); 485 486 void verify(); 487 }; 488 489 inline NativeGeneralJump* nativeGeneralJump_at(address address) { 490 NativeGeneralJump* jump = (NativeGeneralJump*)(address); 491 DEBUG_ONLY(jump->verify();) 492 return jump; 493 } 494 495 class NativeIllegalInstruction: public NativeInstruction { 496 public: 497 enum Intel_specific_constants { 498 instruction_code = 0x0B0F, // Real byte order is: 0x0F, 0x0B 499 instruction_size = 2, 500 instruction_offset = 0, 501 next_instruction_offset = 2 502 }; 503 504 // Insert illegal opcode as specific address 505 static void insert(address code_pos); 506 }; 507 508 // return instruction that does not pop values of the stack 509 class NativeReturn: public NativeInstruction { 510 public: 511 enum Intel_specific_constants { 512 instruction_code = 0xC3, 513 instruction_size = 1, 514 instruction_offset = 0, 515 next_instruction_offset = 1 516 }; 517 }; 518 519 // return instruction that does pop values of the stack 520 class NativeReturnX: public NativeInstruction { 521 public: 522 enum Intel_specific_constants { 523 instruction_code = 0xC2, 524 instruction_size = 2, 525 instruction_offset = 0, 526 next_instruction_offset = 2 527 }; 528 }; 529 530 // Simple test vs memory 531 class NativeTstRegMem: public NativeInstruction { 532 public: 533 enum Intel_specific_constants { 534 instruction_rex_prefix_mask = 0xF0, 535 instruction_rex_prefix = Assembler::REX, 536 instruction_rex_b_prefix = Assembler::REX_B, 537 instruction_code_memXregl = 0x85, 538 modrm_mask = 0x38, // select reg from the ModRM byte 539 modrm_reg = 0x00 // rax 540 }; 541 }; 542 543 inline bool NativeInstruction::is_illegal() { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; } 544 inline bool NativeInstruction::is_call() { return ubyte_at(0) == NativeCall::instruction_code; } 545 inline bool NativeInstruction::is_call_reg() { return ubyte_at(0) == NativeCallReg::instruction_code || 546 (ubyte_at(1) == NativeCallReg::instruction_code && 547 (ubyte_at(0) == Assembler::REX || ubyte_at(0) == Assembler::REX_B)); } 548 inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code || 549 ubyte_at(0) == NativeReturnX::instruction_code; } 550 inline bool NativeInstruction::is_jump() { return ubyte_at(0) == NativeJump::instruction_code || 551 ubyte_at(0) == 0xEB; /* short jump */ } 552 inline bool NativeInstruction::is_jump_reg() { 553 int pos = 0; 554 if (ubyte_at(0) == Assembler::REX_B) pos = 1; 555 return ubyte_at(pos) == 0xFF && (ubyte_at(pos + 1) & 0xF0) == 0xE0; 556 } 557 inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ || 558 (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } 559 inline bool NativeInstruction::is_safepoint_poll() { 560 const bool has_rex_prefix = ubyte_at(0) == NativeTstRegMem::instruction_rex_b_prefix; 561 const int test_offset = has_rex2_prefix() ? 2 : (has_rex_prefix ? 1 : 0); 562 const bool is_test_opcode = ubyte_at(test_offset) == NativeTstRegMem::instruction_code_memXregl; 563 const bool is_rax_target = (ubyte_at(test_offset + 1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg; 564 return is_test_opcode && is_rax_target; 565 } 566 567 inline bool NativeInstruction::is_mov_literal64() { 568 bool valid_rex_prefix = ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB; 569 bool valid_rex2_prefix = ubyte_at(0) == Assembler::REX2 && 570 (ubyte_at(1) == Assembler::REX2BIT_W || 571 ubyte_at(1) == Assembler::REX2BIT_WB || 572 ubyte_at(1) == Assembler::REX2BIT_WB4); 573 574 int opcode = has_rex2_prefix() ? ubyte_at(2) : ubyte_at(1); 575 return ((valid_rex_prefix || valid_rex2_prefix) && (opcode & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8); 576 } 577 578 class NativePostCallNop: public NativeInstruction { 579 public: 580 enum Intel_specific_constants { 581 instruction_code = 0x0f, 582 instruction_size = 8, 583 instruction_offset = 0, 584 displacement_offset = 4 585 }; 586 587 bool check() const { return int_at(0) == 0x841f0f; } 588 bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const { 589 int32_t data = int_at(displacement_offset); 590 if (data == 0) { 591 return false; // no information encoded 592 } 593 cb_offset = (data & 0xffffff); 594 oopmap_slot = (data >> 24) & 0xff; 595 return true; // decoding succeeded 596 } 597 bool patch(int32_t oopmap_slot, int32_t cb_offset); 598 void make_deopt(); 599 }; 600 601 inline NativePostCallNop* nativePostCallNop_at(address address) { 602 NativePostCallNop* nop = (NativePostCallNop*) address; 603 if (nop->check()) { 604 return nop; 605 } 606 return nullptr; 607 } 608 609 inline NativePostCallNop* nativePostCallNop_unsafe_at(address address) { 610 NativePostCallNop* nop = (NativePostCallNop*) address; 611 assert(nop->check(), ""); 612 return nop; 613 } 614 615 class NativeDeoptInstruction: public NativeInstruction { 616 public: 617 enum Intel_specific_constants { 618 instruction_prefix = 0x0F, 619 instruction_code = 0xFF, 620 instruction_size = 3, 621 instruction_offset = 0, 622 }; 623 624 address instruction_address() const { return addr_at(instruction_offset); } 625 address next_instruction_address() const { return addr_at(instruction_size); } 626 627 void verify(); 628 629 static bool is_deopt_at(address instr) { 630 return ((*instr) & 0xFF) == NativeDeoptInstruction::instruction_prefix && 631 ((*(instr+1)) & 0xFF) == NativeDeoptInstruction::instruction_code; 632 } 633 634 // MT-safe patching 635 static void insert(address code_pos, bool invalidate = true); 636 }; 637 638 #endif // CPU_X86_NATIVEINST_X86_HPP