1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2015, Linaro Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "nativeInst_aarch32.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/handles.hpp"
  34 #include "runtime/sharedRuntime.hpp"
  35 #include "runtime/stubRoutines.hpp"
  36 #include "utilities/ostream.hpp"
  37 #ifdef COMPILER1
  38 #include "c1/c1_Runtime1.hpp"
  39 #endif
  40 
  41 // LIRAssembler fills patching site with nops up to NativeCall::instruction_size
  42 int NativeCall::instruction_size = 5 * arm_insn_sz;
  43 
  44 NativeInstruction* NativeInstruction::from(address addr) {
  45   return (NativeInstruction*) addr;
  46 }
  47 
  48 //-------------------------------------------------------------------
  49 
  50 void NativeCall::init() {
  51   instruction_size = (VM_Version::features() & (FT_ARMV6T2 | FT_ARMV7) ? 3 : 5) * arm_insn_sz;
  52 }
  53 
  54 void NativeCall::verify() {
  55   if (!is_call()) {
  56     fatal("not a call");
  57   }
  58 }
  59 
  60 address NativeCall::destination() const {
  61   assert(is_call(), "not a call");
  62   if (NativeImmCall::is_at(addr())) {
  63     return NativeImmCall::from(addr())->destination();
  64   } else if (NativeMovConstReg::is_at(addr())) {
  65     return address(NativeMovConstReg::from(addr())->data());
  66   } else if (NativeTrampolineCall::is_at(addr())) {
  67     return NativeTrampolineCall::from(addr())->destination();
  68   }
  69   ShouldNotReachHere();
  70   return NULL;
  71 }
  72 
  73 void NativeCall::set_destination(address dest) {
  74   assert(is_call(), "not a call");
  75   if (NativeImmCall::is_at(addr())) {
  76     NativeImmCall::from(addr())->set_destination(dest);
  77   } else if (NativeMovConstReg::is_at(addr())) {
  78     NativeMovConstReg::from(addr())->set_data((uintptr_t) dest);
  79   } else if (NativeTrampolineCall::is_at(addr())) {
  80     NativeTrampolineCall::from(addr())->set_destination(dest);
  81   } else {
  82     ShouldNotReachHere();
  83   }
  84 }
  85 
  86 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
  87   assert(is_call(), "not a call");
  88 
  89   // patching should be not only safe (i.e. this call could be executed by some thread),
  90   // but it also should be atomic (some other thread could call NativeCall::destination()
  91   // and see valid destination value)
  92 
  93   if (NativeImmCall::is_at(addr())) {
  94     NativeImmCall::from(addr())->set_destination(dest);
  95     ICache::invalidate_word(addr());
  96   } else if (NativeTrampolineCall::is_at(addr())) {
  97     NativeTrampolineCall::from(addr())->set_destination_mt_safe(dest);
  98   } else {
  99     ShouldNotReachHere();
 100   }
 101 }
 102 
 103 void NativeCall::insert(address code_pos, address entry) {
 104   Unimplemented();
 105 }
 106 
 107 bool NativeCall::is_call_before(address return_address) {
 108   if (NativeTrampolineCall::is_at(return_address - NativeCall::instruction_size)) {
 109     return true;
 110   } else if (NativeMovConstReg::is_at(return_address - NativeCall::instruction_size)) {
 111     NativeMovConstReg *nm = NativeMovConstReg::from(return_address - NativeCall::instruction_size);
 112     address next_instr = nm->next_instruction_address();
 113     return NativeRegCall::is_at(next_instr) && NativeRegCall::from(next_instr)->destination() == nm->destination();
 114   } else if (NativeImmCall::is_at(return_address - NativeBranchType::instruction_size)) {
 115     return true;
 116   }
 117   return false;
 118 }
 119 
 120 address NativeCall::next_instruction_address() const {
 121   assert(is_call(), "not a call");
 122   if (NativeImmCall::is_at(addr())) {
 123     return NativeImmCall::from(addr())->next_instruction_address();
 124   } else if (NativeMovConstReg::is_at(addr())) {
 125     NativeMovConstReg *nm = NativeMovConstReg::from(addr());
 126     address next_instr = nm->next_instruction_address();
 127     assert(NativeRegCall::is_at(next_instr), "should be");
 128     return NativeRegCall::from(next_instr)->next_instruction_address();
 129   } else if (NativeTrampolineCall::is_at(addr())) {
 130     return NativeTrampolineCall::from(addr())->next_instruction_address();
 131   } else {
 132     ShouldNotReachHere();
 133     return NULL;
 134   }
 135 }
 136 
 137 address NativeCall::return_address() const {
 138   return next_instruction_address();
 139 }
 140 
 141 bool NativeCall::is_at(address addr) {
 142   if (NativeImmCall::is_at(addr)) {
 143     return true;
 144   } else if (NativeMovConstReg::is_at(addr)) {
 145     NativeMovConstReg *nm = NativeMovConstReg::from(addr);
 146     address next_instr = nm->next_instruction_address();
 147     return NativeRegCall::is_at(next_instr) &&
 148       NativeRegCall::from(next_instr)->destination() == nm->destination();
 149   } else if (NativeTrampolineCall::is_at(addr)) {
 150     return true;
 151   }
 152   return false;
 153 }
 154 
 155 NativeCall* NativeCall::from(address addr) {
 156   assert(NativeCall::is_at(addr), "");
 157   return (NativeCall*) addr;
 158 }
 159 
 160 //-------------------------------------------------------------------
 161 
 162 address NativeTrampolineCall::destination() const {
 163   assert(is_at(addr()), "not call");
 164   return (address) uint_at(8);
 165 }
 166 
 167 void NativeTrampolineCall::set_destination(address dest) {
 168   assert(is_at(addr()), "not call");
 169   set_uint_at(8, (uintptr_t) dest);
 170 }
 171 
 172 void NativeTrampolineCall::set_destination_mt_safe(address dest, bool assert_lock) {
 173   assert(is_at(addr()), "not call");
 174   set_destination(dest);
 175   // FIXME invalidate data cache
 176 }
 177 
 178 bool NativeTrampolineCall::is_at(address addr) {
 179   return (as_uint(addr    ) & ~0xffu) == 0xe28fe000 // add     lr, pc, #disp
 180        && as_uint(addr + 4)          == 0xe51ff004; // ldr     pc, [pc, -4]
 181 }
 182 
 183 NativeTrampolineCall* NativeTrampolineCall::from(address addr) {
 184   assert(NativeTrampolineCall::is_at(addr), "");
 185   return (NativeTrampolineCall*) addr;
 186 }
 187 
 188 //-------------------------------------------------------------------
 189 
 190 address NativeImmCall::destination() const {
 191   assert(is_imm_call(), "not call");
 192   uint32_t insn = as_uint();
 193   intptr_t off = Instruction_aarch32::sextract(insn, 23, 0);
 194   address destination = addr() + 8 + (off << 2);
 195   return destination;
 196 }
 197 
 198 void NativeImmCall::set_destination(address dest) {
 199   assert(is_imm_call(), "not call");
 200   patch_offset_to(dest);
 201 }
 202 
 203 bool NativeImmCall::is_at(address addr) {
 204   return Instruction_aarch32::extract(as_uint(addr), 27, 24)  == 0b1011;
 205 }
 206 
 207 NativeImmCall* NativeImmCall::from(address addr) {
 208   assert(NativeImmCall::is_at(addr), "");
 209   return (NativeImmCall*) addr;
 210 }
 211 
 212 //-------------------------------------------------------------------
 213 
 214 Register NativeRegCall::destination() const {
 215   assert(is_reg_call(), "not call");
 216   return (Register) Instruction_aarch32::extract(as_uint(), 3, 0);
 217 }
 218 
 219 bool NativeRegCall::is_at(address addr) {
 220   unsigned insn = as_uint(addr);
 221   return is_branch_type(insn) && Instruction_aarch32::extract(insn, 7, 4) == 0b0011;
 222 }
 223 
 224 NativeRegCall* NativeRegCall::from(address addr) {
 225   assert(NativeRegCall::is_at(addr), "");
 226   return (NativeRegCall*) addr;
 227 }
 228 
 229 //-------------------------------------------------------------------
 230 
 231 address NativeFarLdr::skip_patching_prolog(address addr) {
 232   if (NativeInstruction::from(addr)->is_nop() &&
 233       NativeInstruction::from(addr + arm_insn_sz)->is_barrer()) {
 234     return addr+2*arm_insn_sz;
 235   }
 236   return addr;
 237 }
 238 
 239 bool NativeFarLdr::is_at(address addr) {
 240   addr = skip_patching_prolog(addr);
 241   unsigned add_condidate = as_uint(addr);
 242   if (((Instruction_aarch32::extract(add_condidate, 27, 21)  != 0b0010100) /*add*/ &&
 243         (Instruction_aarch32::extract(add_condidate, 27, 21) != 0b0010010) /*sub*/) ||
 244       (Instruction_aarch32::extract(add_condidate, 19, 16) != (unsigned) r15_pc->encoding())) {
 245     return false;
 246   }
 247   Register dest = as_Register(Instruction_aarch32::extract(add_condidate, 15, 12));
 248   return NativeMovConstReg::is_ldr_literal_at(addr + arm_insn_sz, dest);
 249 }
 250 
 251 NativeFarLdr* NativeFarLdr::from(address addr) {
 252   assert(is_at(addr), "");
 253   return (NativeFarLdr*) addr;
 254 }
 255 
 256 intptr_t* NativeFarLdr::data_addr() {
 257   address self = skip_patching_prolog(addr());
 258   off_t offset = 8;
 259   off_t add_off = Assembler::decode_imm12(as_uint(self) & 0xfff);
 260   if (Instruction_aarch32::extract(as_uint(self), 24, 21) == 0x4) {
 261     offset += add_off;
 262   } else {
 263     offset -= add_off;
 264   }
 265   off_t ldr_off = as_uint(self + arm_insn_sz) & 0xfff;
 266   if (Instruction_aarch32::extract(as_uint(self), 23, 23)) {
 267     offset += ldr_off;
 268   } else {
 269     offset -= ldr_off;
 270   }
 271 
 272   return (intptr_t*)(self + offset);
 273 }
 274 
 275 void NativeFarLdr::set_data_addr(intptr_t *data_addr) {
 276   address self = skip_patching_prolog(addr());
 277   off_t offset = (address)data_addr - (self + 8);
 278   bool minus = false;
 279   if (offset < 0) {
 280     offset = -offset;
 281     minus = true;
 282   }
 283   guarantee((0 <= offset) && (offset <= 0xffffff), "offset too large");
 284   set_uint_at(self - addr(), (as_uint(self) & ~0xc00fff) |
 285     (minus ? 0x400000u /*sub*/ : 0x800000u /*add*/) |
 286     Assembler::encode_imm12(offset & 0xff000));
 287 
 288   set_uint_at(self - addr() + arm_insn_sz,
 289       (as_uint(self + arm_insn_sz) & ~0x800fff) |
 290       (minus ? 0x000000 : 0x800000) |
 291       (offset & 0xfff));
 292   ICache::invalidate_range(self, 2*arm_insn_sz);
 293 }
 294 
 295 address NativeFarLdr::next_instruction_address() const {
 296   return skip_patching_prolog(addr()) + NativeMovConstReg::far_ldr_sz;
 297 }
 298 
 299 //-------------------------------------------------------------------
 300 
 301 void NativeMovConstReg::verify() {
 302   if (!is_mov_const_reg()) {
 303     fatal("not a mov const reg");
 304   }
 305 }
 306 
 307 intptr_t NativeMovConstReg::data() const {
 308   if (NativeFarLdr::is_at(addr())) {
 309     return *NativeFarLdr::from(addr())->data_addr();
 310   }
 311   return (intptr_t) MacroAssembler::target_addr_for_insn(addr());
 312 }
 313 
 314 void NativeMovConstReg::set_data(intptr_t x) {
 315   if (NativeFarLdr::is_at(addr())) {
 316     *NativeFarLdr::from(addr())->data_addr() = x;
 317     // Fences should be provided by calling code!
 318   } else {
 319     MacroAssembler::pd_patch_instruction(addr(), (address)x);
 320     ICache::invalidate_range(addr(), max_instruction_size);
 321   }
 322 }
 323 
 324 void NativeMovConstReg::print() {
 325   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
 326                 p2i(addr()), data());
 327 }
 328 
 329 Register NativeMovConstReg::destination() const {
 330   return (Register) Instruction_aarch32::extract(as_uint(), 15, 12);
 331 }
 332 
 333 NativeMovConstReg* NativeMovConstReg::from(address addr) {
 334   assert(NativeMovConstReg::is_at(addr), "");
 335   return (NativeMovConstReg*) addr;
 336 }
 337 
 338 bool NativeMovConstReg::is_ldr_literal_at(address addr, Register from) {
 339   unsigned insn = as_uint(addr);
 340   if (from == noreg) {
 341     return (Instruction_aarch32::extract(insn, 27, 20) & 0b11100101) == 0b01000001;
 342   }
 343   unsigned reg = from->encoding();
 344   return (Instruction_aarch32::extract(insn, 27, 16) & 0b111001011111) == (0b010000010000 | reg);
 345 }
 346 
 347 bool NativeMovConstReg::is_far_ldr_literal_at(address addr) {
 348   return NativeFarLdr::is_at(addr);
 349 }
 350 
 351 bool NativeMovConstReg::is_movw_movt_at(address addr) {
 352   unsigned insn = as_uint(addr);
 353   unsigned insn2 = as_uint(addr + arm_insn_sz);
 354   return Instruction_aarch32::extract(insn,  27, 20) == 0b00110000 && //mov
 355          Instruction_aarch32::extract(insn2, 27, 20) == 0b00110100;   //movt
 356 }
 357 
 358 bool NativeMovConstReg::is_mov_n_three_orr_at(address addr) {
 359   return (Instruction_aarch32::extract(as_uint(addr), 27, 16) & 0b111111101111) == 0b001110100000 &&
 360           Instruction_aarch32::extract(as_uint(addr+arm_insn_sz), 27, 20) == 0b00111000 &&
 361           Instruction_aarch32::extract(as_uint(addr+2*arm_insn_sz), 27, 20) == 0b00111000 &&
 362           Instruction_aarch32::extract(as_uint(addr+3*arm_insn_sz), 27, 21) == 0b0011100;
 363 }
 364 
 365 bool NativeMovConstReg::is_at(address addr) {
 366   return is_ldr_literal_at(addr) ||
 367           is_far_ldr_literal_at(addr) ||
 368           is_movw_movt_at(addr) ||
 369           is_mov_n_three_orr_at(addr);
 370 }
 371 
 372 //-------------------------------------------------------------------
 373 address NativeMovRegMem::instruction_address() const {
 374   return addr();
 375 }
 376 
 377 int NativeMovRegMem::offset() const  {
 378   assert(NativeMovConstReg::is_at(addr()), "no others");
 379   return NativeMovConstReg::from(addr())->data();
 380 }
 381 
 382 void NativeMovRegMem::set_offset(int x) {
 383   assert(NativeMovConstReg::is_at(addr()), "no others");
 384   NativeMovConstReg::from(addr())->set_data(x);
 385 }
 386 
 387 void NativeMovRegMem::verify() {
 388   assert(NativeMovConstReg::is_at(addr()), "no others");
 389 }
 390 
 391 //--------------------------------------------------------------------------------
 392 
 393 void NativeJump::verify() {
 394   if (!is_jump()) {
 395     fatal("not a call");
 396   }
 397 }
 398 
 399 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 400 }
 401 
 402 address NativeJump::jump_destination() const {
 403   assert(is_jump(), "not a call");
 404   if (NativeImmJump::is_at(addr())) {
 405     return NativeImmJump::from(addr())->destination();
 406   } else if (NativeMovConstReg::is_at(addr())) {
 407     return address(NativeMovConstReg::from(addr())->data());
 408   }
 409   ShouldNotReachHere();
 410   return NULL;
 411 }
 412 
 413 void NativeJump::set_jump_destination(address dest) {
 414   assert(is_jump(), "not a call");
 415   if (NativeImmJump::is_at(addr())) {
 416     NativeImmJump::from(addr())->set_destination(dest);
 417   } else if (NativeMovConstReg::is_at(addr())) {
 418     NativeMovConstReg::from(addr())->set_data((uintptr_t) dest);
 419   } else {
 420     ShouldNotReachHere();
 421   }
 422 }
 423 
 424 address NativeJump::next_instruction_address() const {
 425   assert(is_jump(), "not a call");
 426   if (NativeImmJump::is_at(addr())) {
 427     return NativeImmJump::from(addr())->next_instruction_address();
 428   } else if (NativeMovConstReg::is_at(addr())) {
 429     address after_move = NativeMovConstReg::from(addr())->next_instruction_address();
 430     assert(NativeRegJump::is_at(after_move), "should be jump");
 431     return NativeRegJump::from(after_move)->next_instruction_address();
 432   }
 433   ShouldNotReachHere();
 434   return NULL;
 435 }
 436 
 437 bool NativeJump::is_at(address addr) {
 438   if (NativeImmJump::is_at(addr)) {
 439     return true;
 440   }
 441   if (NativeMovConstReg::is_at(addr)) {
 442     NativeMovConstReg *nm = NativeMovConstReg::from(addr);
 443     address next_instr = nm->next_instruction_address();
 444     return NativeRegJump::is_at(next_instr) &&
 445       NativeRegJump::from(next_instr)->destination() == nm->destination();
 446   }
 447   return false;
 448 }
 449 
 450 NativeJump* NativeJump::from(address addr) {
 451   assert(NativeJump::is_at(addr), "");
 452   return (NativeJump*) addr;
 453 }
 454 
 455 // MT-safe inserting of a jump over a jump or a nop (used by
 456 // nmethod::make_not_entrant_or_zombie)
 457 
 458 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 459 
 460   assert(dest == SharedRuntime::get_handle_wrong_method_stub(),
 461      "expected fixed destination of patch");
 462   assert(NativeInstruction::from(verified_entry)->is_jump_or_nop() ||
 463       NativeInstruction::from(verified_entry)->is_sigill_zombie_not_entrant(),
 464          "Aarch32 cannot replace non-jump with jump");
 465 
 466   // Patch this nmethod atomically.
 467   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
 468     assert((((intptr_t) dest & 0x3) == 0) && (((intptr_t) verified_entry & 0x3) == 0),
 469         "addresses should be aligned on 4");
 470     ptrdiff_t disp = (dest - verified_entry - 8) >> 2;
 471     guarantee((-(1 << 23) <= disp) && (disp < (1 << 23)), "branch overflow");
 472 
 473     unsigned int insn = (0b11101010 << 24) | (disp & 0xffffff);
 474     *(unsigned int*)verified_entry = insn;
 475   } else {
 476     // We use an illegal instruction for marking a method as
 477     // not_entrant or zombie.
 478     NativeIllegalInstruction::insert(verified_entry);
 479   }
 480 
 481   ICache::invalidate_range(verified_entry, instruction_size);
 482 }
 483 
 484 //-------------------------------------------------------------------
 485 
 486 bool NativeBranchType::is_branch_type(uint32_t insn) {
 487   return Instruction_aarch32::extract(insn, 27, 20) == 0b00010010 &&
 488     Instruction_aarch32::extract(insn, 19, 8) == 0b111111111111;
 489 }
 490 
 491 void NativeBranchType::patch_offset_to(address dest) {
 492   uint32_t insn = as_uint();
 493   const intptr_t off = (dest - (addr() + 8));
 494   assert((off & 3) == 0, "should be");
 495   assert(-32 * 1024 * 1024 <= off && off < 32 * 1024 * 1042,
 496       "new offset should fit in instruction");
 497 
 498   const unsigned off_mask = ((1U << 24) - 1);
 499   insn &= ~off_mask; // mask off offset part
 500   insn |= ((unsigned) off >> 2) & off_mask;
 501 
 502   set_uint(insn);
 503   ICache::invalidate_range(addr_at(0), instruction_size);
 504 }
 505 
 506 //-------------------------------------------------------------------
 507 
 508 address NativeImmJump::destination() const {
 509   assert(is_imm_jump(), "not jump");
 510   return addr() + 8 + 4 * Instruction_aarch32::sextract(as_uint(), 23, 0);
 511 }
 512 
 513 void NativeImmJump::set_destination(address addr) {
 514   assert(is_imm_jump(), "");
 515   patch_offset_to(addr);
 516 }
 517 
 518 bool NativeImmJump::is_at(address addr) {
 519   unsigned insn = as_uint(addr);
 520   return Instruction_aarch32::extract(insn, 27, 24)  == 0b1010;
 521 }
 522 
 523 NativeImmJump* NativeImmJump::from(address addr) {
 524   assert(NativeImmJump::is_at(addr), "");
 525   return (NativeImmJump*) addr;
 526 }
 527 
 528 //-------------------------------------------------------------------
 529 
 530 bool NativeRegJump::is_at(address addr) {
 531   unsigned insn = as_uint(addr);
 532   return is_branch_type(insn) && Instruction_aarch32::extract(insn, 7, 4) == 0b0001;
 533 }
 534 
 535 NativeRegJump* NativeRegJump::from(address addr) {
 536   assert(NativeRegJump::is_at(addr), "");
 537   return (NativeRegJump*) addr;
 538 }
 539 
 540 Register NativeRegJump::destination() const {
 541   assert(is_reg_jump(), "");
 542   return (Register) Instruction_aarch32::extract(as_uint(), 3, 0);
 543 }
 544 
 545 //-------------------------------------------------------------------
 546 
 547 bool NativeInstruction::is_safepoint_poll() {
 548   // a safepoint_poll is implemented in two steps as
 549   //
 550   // movw(r9, polling_page & 0xffff);
 551   // movt(r9, polling_page >> 16);
 552   // ldr(r9, [r9, #0]);
 553   //
 554   // We can rely on this instructions order until we have only C1
 555 
 556   const intptr_t paddr = (intptr_t)os::get_polling_page();
 557   const Register scratch = rscratch1;
 558 
 559   if (NativeInstruction::from(addr())->is_ldr(scratch, Address(scratch))) {
 560     NativeMovConstReg* mov_const = NativeMovConstReg::before(addr());
 561     return (mov_const->data() == paddr) && (mov_const->destination() == scratch);
 562   }
 563 
 564   return false;
 565 }
 566 
 567 bool NativeInstruction::is_movt(Register dst, unsigned imm, Assembler::Condition cond) {
 568   bool a1 = Instruction_aarch32::extract(uint_at(0), 27, 20) == 0b00110100;
 569   bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 570   bool a3 = Instruction_aarch32::extract(uint_at(0), 11, 0) == ((unsigned)imm & 0xfff);
 571   bool a4 = Instruction_aarch32::extract(uint_at(0), 19, 16) == ((unsigned)imm >> 12);
 572   bool a5 = Instruction_aarch32::extract(uint_at(0), 31, 28) == (unsigned)cond;
 573 
 574   return a1 && a2 && a3 && a4 && a5;
 575 }
 576 
 577 bool NativeInstruction::is_movw(Register dst, unsigned imm, Assembler::Condition cond) {
 578   bool a1 = Instruction_aarch32::extract(uint_at(0), 27, 20) == 0b00110000;
 579   bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 580   bool a3 = Instruction_aarch32::extract(uint_at(0), 11, 0) == ((unsigned)imm & 0xfff);
 581   bool a4 = Instruction_aarch32::extract(uint_at(0), 19, 16) == ((unsigned)imm >> 12);
 582   bool a5 = Instruction_aarch32::extract(uint_at(0), 31, 28) == (unsigned)cond;
 583 
 584   return a1 && a2 && a3 && a4 && a5;
 585 }
 586 
 587 bool NativeInstruction::is_ldr(Register dst, Address addr, Assembler::Condition cond) {
 588     assert(addr.get_mode() == Address::imm, "unimplemented");
 589     assert(addr.get_wb_mode() == Address::off, "unimplemented");
 590     assert(addr.index() == noreg, "unimplemented");
 591     assert(addr.offset() == 0, "unimplemented");
 592 
 593     bool b0 = Instruction_aarch32::extract(uint_at(0), 24, 24) == 1; //P
 594     bool b1 = Instruction_aarch32::extract(uint_at(0), 23, 23) == 1; //U
 595     bool b2 = Instruction_aarch32::extract(uint_at(0), 21, 21) == 0; //W
 596     bool b3 = Instruction_aarch32::extract(uint_at(0), 19, 16) == (unsigned)addr.base();
 597     bool b4 = Instruction_aarch32::extract(uint_at(0), 11, 0) == 0;
 598 
 599     bool a1 = b0 && b1 && b2 && b3 && b4; //Address encoding
 600 
 601     bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 602     bool a3 = Instruction_aarch32::extract(uint_at(0), 20, 20) == 1;
 603     bool a4 = Instruction_aarch32::extract(uint_at(0), 22, 22) == 0;
 604     bool a5 = Instruction_aarch32::extract(uint_at(0), 27, 25) == 0b010;
 605     bool a6 = Instruction_aarch32::extract(uint_at(0), 31, 28) == (unsigned)cond;
 606 
 607     return a1 && a2 && a3 && a4 && a5 && a6;
 608 }
 609 
 610 
 611 bool NativeInstruction::is_movt() {
 612   return Instruction_aarch32::extract(int_at(0), 27, 20) == 0b00110100;
 613 }
 614 
 615 bool NativeInstruction::is_orr() {
 616   return Instruction_aarch32::extract(int_at(0), 27, 21) == 0b0011100;
 617 }
 618 
 619 bool NativeInstruction::is_sigill_zombie_not_entrant() {
 620   return as_uint() == 0xe7fdeafd; // udf #0xdead
 621 }
 622 
 623 void NativeIllegalInstruction::insert(address code_pos) {
 624   *(juint*)code_pos = 0xe7fdeafd; // udf #0xdead
 625 }
 626 
 627 //-------------------------------------------------------------------
 628 
 629 void NativeGeneralJump::verify() {  }
 630 
 631 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 632   NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
 633   assert(n_jump->is_nop() || n_jump->is_imm_jump(), "not overwrite whats not supposed");
 634 
 635   CodeBuffer cb(code_pos, instruction_size);
 636   MacroAssembler a(&cb);
 637 
 638   a.b(entry);
 639 
 640   ICache::invalidate_range(code_pos, instruction_size);
 641 }
 642 
 643 // MT-safe patching of a long jump instruction.
 644 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 645   if (NativeFarLdr::is_at(instr_addr+2*arm_insn_sz)) {
 646     assert(NativeInstruction::from(code_buffer)->is_nop(), "code_buffer image");
 647     assert(NativeImmJump::is_at(instr_addr), "instr_image image");
 648     // first 'b' prevents NativeFarLdr to recognize patching_prolog, skip it manually
 649     address load_instr = instr_addr+2*arm_insn_sz;
 650 
 651     NativeFarLdr::from(load_instr)->set_data_addr(NativeFarLdr::from(code_buffer)->data_addr());
 652 
 653     WRITE_MEM_BARRIER;
 654     *(uintptr_t*)instr_addr = *(uintptr_t*)code_buffer;
 655     ICache::invalidate_word(instr_addr);
 656 
 657     assert(NativeFarLdr::is_at(instr_addr), "now valid constant loading");
 658   } else {
 659     ShouldNotReachHere();
 660   }
 661 }