1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2015, Linaro Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #ifndef CPU_AARCH32_VM_NATIVEINST_AARCH32_HPP
  28 #define CPU_AARCH32_VM_NATIVEINST_AARCH32_HPP
  29 
  30 #include "asm/assembler.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "runtime/icache.hpp"
  33 #include "runtime/os.hpp"
  34 #include "utilities/top.hpp"
  35 
  36 // We have interfaces for the following instructions:
  37 // - NativeInstruction
  38 // - - NativeCall
  39 // - - NativeMovConstReg
  40 // - - NativeMovRegMem
  41 // - - NativeMovRegMemPatching
  42 // - - NativeJump
  43 // - - NativeIllegalOpCode
  44 // - - NativeGeneralJump
  45 // - - NativeReturn
  46 // - - NativeReturnX (return with argument)
  47 // - - NativePushConst
  48 // - - NativeTstRegMem
  49 
  50 // The base class for different kinds of native instruction abstractions.
  51 // Provides the primitive operations to manipulate code relative to this.
  52 
  53 class NativeInstruction VALUE_OBJ_CLASS_SPEC {
  54   friend class Relocation;
  55   friend bool is_NativeCallTrampolineStub_at(address);
  56  public:
  57   enum { arm_insn_sz = 4 };
  58 
  59   inline bool is_nop();
  60   inline bool is_barrer();
  61   inline bool is_illegal();
  62   inline bool is_return();
  63   inline bool is_jump_or_nop();
  64   inline bool is_cond_jump();
  65   bool is_safepoint_poll();
  66   bool is_movt();
  67   bool is_orr();
  68   bool is_sigill_zombie_not_entrant();
  69 
  70   bool is_movt(Register dst, unsigned imm, Assembler::Condition cond = Assembler::C_DFLT);
  71   bool is_movw(Register dst, unsigned imm, Assembler::Condition cond = Assembler::C_DFLT);
  72   bool is_ldr(Register dst, Address addr, Assembler::Condition cond = Assembler::C_DFLT);
  73 
  74   inline bool is_jump() const;
  75   inline bool is_call() const;
  76 
  77   inline bool is_mov_const_reg() const;
  78   inline bool is_reg_call() const;
  79   inline bool is_imm_call() const;
  80   inline bool is_reg_jump() const;
  81   inline bool is_imm_jump() const;
  82 
  83  protected:
  84   address addr() const { return address(this); }
  85   // TODO remove this, every command is 4byte long
  86 #if 1
  87   address addr_at(int offset) const    { return addr() + offset; }
  88 
  89   s_char sbyte_at(int offset) const    { return *(s_char*) addr_at(offset); }
  90   u_char ubyte_at(int offset) const    { return *(u_char*) addr_at(offset); }
  91 
  92   jint int_at(int offset) const        { return *(jint*) addr_at(offset); }
  93   juint uint_at(int offset) const      { return *(juint*) addr_at(offset); }
  94 
  95   address ptr_at(int offset) const     { return *(address*) addr_at(offset); }
  96 
  97   oop  oop_at (int offset) const       { return *(oop*) addr_at(offset); }
  98 
  99 
 100   void set_char_at(int offset, char c)        { *addr_at(offset) = (u_char)c; }
 101   void set_int_at(int offset, jint  i)        { *(jint*)addr_at(offset) = i; }
 102   void set_uint_at(int offset, jint  i)       { *(juint*)addr_at(offset) = i; }
 103   void set_ptr_at (int offset, address  ptr)  { *(address*) addr_at(offset) = ptr; }
 104   void set_oop_at (int offset, oop  o)        { *(oop*) addr_at(offset) = o; }
 105 #endif
 106 
 107   static juint as_uint(address addr) {
 108     return *(juint *) addr;
 109   }
 110 
 111   juint as_uint() const {
 112     return as_uint(addr());
 113   }
 114 
 115   void set_uint(juint v) {
 116     *(juint *) addr() = v;
 117   }
 118 
 119   void atomic_set_ulong_at(int offset, julong v) {
 120     address a = addr() + offset;
 121     assert(((uintptr_t) a) % 8 == 0, "should be aligned");
 122     Atomic::store(v, (volatile jlong *) a);
 123   }
 124 
 125  public:
 126 
 127   // unit test stuff
 128   static void test() {}                 // override for testing
 129 
 130   static bool is_at(address address);
 131   static NativeInstruction* from(address address);
 132 
 133 };
 134 
 135 inline NativeInstruction* nativeInstruction_at(address addr) {
 136   return NativeInstruction::from(addr);
 137 }
 138 
 139 inline NativeInstruction* nativeInstruction_at(uint32_t *addr) {
 140   return NativeInstruction::from(address(addr));
 141 }
 142 
 143 class NativeBranchType: public NativeInstruction {
 144  protected:
 145   static bool is_branch_type(uint32_t insn);
 146   void patch_offset_to(address addr);
 147  public:
 148   enum {
 149     instruction_size = arm_insn_sz,
 150   };
 151 
 152   address next_instruction_address() const {
 153     return addr() + arm_insn_sz;
 154   }
 155 };
 156 
 157 class NativeFarLdr: public NativeInstruction {
 158  private:
 159    static address skip_patching_prolog(address addr);
 160  public:
 161    static bool is_at(address addr);
 162    static NativeFarLdr* from(address addr);
 163    intptr_t *data_addr();
 164    void set_data_addr(intptr_t *data_addr);
 165    address next_instruction_address() const;
 166 };
 167 
 168 class NativeMovConstReg: public NativeInstruction {
 169   friend class Relocation;
 170   friend class NativeMovRegMem;
 171   friend class NativeGeneralJump;
 172   friend class NativeFarLdr;
 173 
 174  protected:
 175   static bool is_ldr_literal_at(address instr, Register from = r15_pc);
 176   static bool is_far_ldr_literal_at(address instr);
 177   static bool is_movw_movt_at(address instr);
 178   static bool is_mov_n_three_orr_at(address instr);
 179  public:
 180   enum {
 181     ldr_sz             = 1 * arm_insn_sz,
 182     far_ldr_sz         = 2 * arm_insn_sz,
 183     movw_movt_pair_sz  = 2 * arm_insn_sz,
 184     mov_n_three_orr_sz = 4 * arm_insn_sz,
 185     min_instruction_size = 1 * arm_insn_sz,
 186     max_instruction_size = 4 * arm_insn_sz,
 187   };
 188 
 189   address next_instruction_address() const  {
 190     if (is_ldr_literal_at(addr())) {
 191       return addr() + ldr_sz;
 192     } else if (is_far_ldr_literal_at(addr())) {
 193       return NativeFarLdr::from(addr())->next_instruction_address();;
 194     } else if (is_movw_movt_at(addr())) {
 195       return addr() + movw_movt_pair_sz;
 196     } else if (is_mov_n_three_orr_at(addr())) {
 197       return addr() + mov_n_three_orr_sz;
 198     }
 199 
 200     // Unknown instruction in NativeMovConstReg
 201     ShouldNotReachHere();
 202     return NULL;
 203   }
 204 
 205   intptr_t data() const;
 206   void set_data(intptr_t x);
 207 
 208   Register destination() const;
 209   void set_destination(Register r);
 210 
 211   void flush() {
 212     ICache::invalidate_range(addr(), max_instruction_size);
 213   }
 214 
 215   void  verify();
 216   void  print();
 217 
 218   // unit test stuff
 219   static void test() {}
 220 
 221   // Creation
 222   inline friend NativeMovConstReg* nativeMovConstReg_at(address address);
 223 
 224   static NativeMovConstReg* before(address addr) {
 225     address mov = NULL;
 226     if (is_ldr_literal_at(addr - ldr_sz)) {
 227       mov = addr - ldr_sz;
 228     } else if (is_far_ldr_literal_at(addr - far_ldr_sz)) {
 229       mov = addr - far_ldr_sz;
 230     } else if (is_movw_movt_at(addr - movw_movt_pair_sz)) {
 231       mov = addr - movw_movt_pair_sz;
 232     } else if (is_mov_n_three_orr_at(addr - mov_n_three_orr_sz)) {
 233       mov = addr - mov_n_three_orr_sz;
 234     }
 235     guarantee(mov, "Can't find NativeMovConstReg before");
 236     return NativeMovConstReg::from(mov);
 237   }
 238 
 239   static bool is_at(address instr);
 240   static NativeMovConstReg* from(address addr);
 241 };
 242 
 243 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
 244   return NativeMovConstReg::from(address);
 245 }
 246 
 247 class NativeTrampolineCall: public NativeInstruction {
 248  public:
 249   // NativeTrampolineCall size is always equal to NativeCall::instruction_size
 250   address destination() const;
 251   void set_destination(address dest);
 252   void set_destination_mt_safe(address dest, bool assert_lock = true);
 253 
 254   static bool is_at(address address);
 255   static NativeTrampolineCall* from(address address);
 256 
 257   address next_instruction_address() const;
 258 };
 259 
 260 class NativeRegCall: public NativeBranchType {
 261  public:
 262 
 263   Register destination() const;
 264   void set_destination(Register r);
 265 
 266   static bool is_at(address address);
 267   static NativeRegCall* from(address address);
 268 };
 269 
 270 class NativeCall: public NativeInstruction {
 271   friend class Relocation;
 272  protected:
 273   NativeInstruction* is_long_jump_or_call_at(address addr);
 274 
 275   // NativeCall represents:
 276   //  NativeImmCall,
 277   //  NativeMovConstReg + NativeBranchType,
 278   //  NativeTrampolineCall
 279  public:
 280   enum {
 281     max_instruction_size = 5 * arm_insn_sz
 282   };
 283 
 284   static int instruction_size;
 285 #ifdef ASSERT
 286   StaticAssert<NativeMovConstReg::movw_movt_pair_sz
 287       + NativeRegCall::instruction_size <= (int) max_instruction_size> dummy2;
 288   StaticAssert<NativeMovConstReg::mov_n_three_orr_sz
 289       + NativeRegCall::instruction_size <= (int) max_instruction_size> dummy3;
 290 #endif
 291 
 292   address destination() const;
 293   void set_destination(address dest);
 294 
 295   static void init();
 296   void  verify_alignment()                       { ; }
 297   void  verify();
 298   void  print();
 299 
 300   address instruction_address() const       { return addr_at(0); }
 301   address next_instruction_address() const;
 302   address return_address() const;
 303 
 304   // MT-safe patching of a call instruction.
 305   static void insert(address code_pos, address entry);
 306 
 307   // Similar to replace_mt_safe, but just changes the destination.  The
 308   // important thing is that free-running threads are able to execute
 309   // this call instruction at all times.  If the call is an immediate BL
 310   // instruction we can simply rely on atomicity of 32-bit writes to
 311   // make sure other threads will see no intermediate states.
 312 
 313   // We cannot rely on locks here, since the free-running threads must run at
 314   // full speed.
 315   //
 316   // Used in the runtime linkage of calls; see class CompiledIC.
 317   // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
 318 
 319   // The parameter assert_lock disables the assertion during code generation.
 320   void set_destination_mt_safe(address dest, bool assert_lock = true);
 321 
 322   static bool is_at(address instr);
 323   static NativeCall* from(address instr);
 324 
 325   static bool is_call_before(address return_address);
 326 };
 327 
 328 inline address NativeTrampolineCall::next_instruction_address() const {
 329   assert(is_at(addr()), "not call");
 330   return addr() + NativeCall::instruction_size;
 331 }
 332 
 333 inline NativeCall* nativeCall_at(address address) {
 334   return NativeCall::from(address);
 335 }
 336 
 337 // An interface for accessing/manipulating native moves of the form:
 338 //      mov[b/w/l/q] [reg + offset], reg   (instruction_code_reg2mem)
 339 //      mov[b/w/l/q] reg, [reg+offset]     (instruction_code_mem2reg
 340 //      mov[s/z]x[w/b/q] [reg + offset], reg
 341 //      fld_s  [reg+offset]
 342 //      fld_d  [reg+offset]
 343 //      fstp_s [reg + offset]
 344 //      fstp_d [reg + offset]
 345 //      mov_literal64  scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
 346 //
 347 // Warning: These routines must be able to handle any instruction sequences
 348 // that are generated as a result of the load/store byte,word,long
 349 // macros.  For example: The load_unsigned_byte instruction generates
 350 // an xor reg,reg inst prior to generating the movb instruction.  This
 351 // class must skip the xor instruction.
 352 
 353 
 354 // TODO Review
 355 class NativeMovRegMem: public NativeInstruction {
 356  public:
 357   enum {
 358     instruction_size = 2 * arm_insn_sz, // TODO check this
 359   };
 360   // helper
 361   int instruction_start() const;
 362 
 363   address instruction_address() const;
 364 
 365   address next_instruction_address() const;
 366 
 367   int   offset() const;
 368 
 369   void  set_offset(int x);
 370 
 371   void  add_offset_in_bytes(int add_offset)     { set_offset ( ( offset() + add_offset ) ); }
 372 
 373   void verify();
 374   void print ();
 375 
 376   // unit test stuff
 377   static void test() {}
 378 
 379  private:
 380   inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
 381 };
 382 
 383 inline NativeMovRegMem* nativeMovRegMem_at (address address) {
 384   NativeMovRegMem* test = (NativeMovRegMem*) address;
 385 #ifdef ASSERT
 386   test->verify();
 387 #endif
 388   return test;
 389 }
 390 
 391 class NativeMovRegMemPatching: public NativeMovRegMem {
 392  private:
 393   friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {Unimplemented(); return 0;  }
 394 };
 395 
 396 class NativeJump: public NativeInstruction {
 397  public:
 398   enum {
 399     instruction_size = NativeMovConstReg::movw_movt_pair_sz + NativeBranchType::instruction_size,
 400   };
 401   address instruction_address() const {
 402     return addr();
 403   }
 404 
 405   address next_instruction_address() const;
 406 
 407   address jump_destination() const;
 408   void set_jump_destination(address dest);
 409 
 410   // Creation
 411   inline friend NativeJump* nativeJump_at(address address);
 412 
 413   void verify();
 414 
 415   // Unit testing stuff
 416   static void test() {}
 417 
 418   // Insertion of native jump instruction
 419   static void insert(address code_pos, address entry);
 420   // MT-safe insertion of native jump at verified method entry
 421   static void check_verified_entry_alignment(address entry, address verified_entry);
 422   static void patch_verified_entry(address entry, address verified_entry, address dest);
 423 
 424   static bool is_at(address instr);
 425   static NativeJump* from(address instr);
 426 };
 427 
 428 inline NativeJump* nativeJump_at(address addr) {
 429   return NativeJump::from(addr);
 430 }
 431 
 432 // TODO We don't really need NativeGeneralJump, NativeJump should be able to do
 433 // everything that General Jump would.  Make this only interface to NativeJump
 434 // from share code (c1_Runtime)
 435 class NativeGeneralJump: public NativeJump {
 436 public:
 437   enum {
 438     instruction_size = arm_insn_sz,
 439   };
 440 
 441   static void insert_unconditional(address code_pos, address entry);
 442   static void replace_mt_safe(address instr_addr, address code_buffer);
 443   static void verify();
 444 };
 445 
 446 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
 447   NativeGeneralJump* jump = (NativeGeneralJump*)(address);
 448   debug_only(jump->verify();)
 449   return jump;
 450 }
 451 
 452 class NativePopReg : public NativeInstruction {
 453  public:
 454   // Insert a pop instruction
 455   static void insert(address code_pos, Register reg);
 456 };
 457 
 458 
 459 class NativeIllegalInstruction: public NativeInstruction {
 460  public:
 461   // Insert illegal opcode as specific address
 462   static void insert(address code_pos);
 463 };
 464 
 465 // return instruction that does not pop values of the stack
 466 class NativeReturn: public NativeInstruction {
 467  public:
 468 };
 469 
 470 // return instruction that does pop values of the stack
 471 class NativeReturnX: public NativeInstruction {
 472  public:
 473 };
 474 
 475 // Simple test vs memory
 476 class NativeTstRegMem: public NativeInstruction {
 477  public:
 478 };
 479 
 480 inline bool NativeInstruction::is_nop()         {
 481   return (as_uint() & 0x0fffffff) == 0x0320f000;
 482 }
 483 
 484 inline bool NativeInstruction::is_barrer()         {
 485   return (as_uint() == 0xf57ff05b /* dmb ish */ ||
 486             as_uint() == 0xee070fba /* mcr 15, 0, r0, cr7, cr10, {5}) */);
 487 }
 488 
 489 inline bool NativeInstruction::is_jump_or_nop() {
 490   return is_nop() || is_jump();
 491 }
 492 
 493 class NativeImmCall: public NativeBranchType {
 494  public:
 495   address destination() const;
 496   void set_destination(address dest);
 497 
 498   static bool is_at(address address);
 499   static NativeImmCall* from(address address);
 500 };
 501 
 502 class NativeImmJump: public NativeBranchType {
 503  public:
 504 
 505   address destination() const;
 506   void set_destination(address r);
 507 
 508   static bool is_at(address address);
 509   static NativeImmJump* from(address address);
 510 };
 511 
 512 class NativeRegJump: public NativeBranchType {
 513  public:
 514 
 515   Register destination() const;
 516   void set_destination(Register r);
 517 
 518   static bool is_at(address address);
 519   static NativeRegJump* from(address address);
 520 };
 521 
 522 inline bool NativeInstruction::is_call() const          { return NativeCall::is_at(addr()); }
 523 inline bool NativeInstruction::is_jump() const          { return NativeJump::is_at(addr()); }
 524 inline bool NativeInstruction::is_mov_const_reg() const { return NativeMovConstReg::is_at(addr()); }
 525 inline bool NativeInstruction::is_imm_call() const      { return NativeImmCall::is_at(addr()); }
 526 inline bool NativeInstruction::is_reg_call() const      { return NativeRegCall::is_at(addr()); }
 527 inline bool NativeInstruction::is_imm_jump() const      { return NativeImmJump::is_at(addr()); }
 528 inline bool NativeInstruction::is_reg_jump() const      { return NativeRegJump::is_at(addr()); }
 529 
 530 inline NativeCall* nativeCall_before(address return_address) {
 531   if (NativeTrampolineCall::is_at(return_address - NativeCall::instruction_size)) {
 532     return NativeCall::from(return_address - NativeCall::instruction_size);
 533   }
 534   if (NativeMovConstReg::is_at(return_address - NativeCall::instruction_size)) {
 535     NativeMovConstReg *nm = NativeMovConstReg::from(return_address - NativeCall::instruction_size);
 536     address next_instr = nm->next_instruction_address();
 537     if (NativeRegCall::is_at(next_instr) &&
 538             NativeRegCall::from(next_instr)->destination() == nm->destination()) {
 539       return NativeCall::from(return_address - NativeCall::instruction_size);
 540     }
 541   }
 542   if (NativeImmCall::is_at(return_address - NativeBranchType::instruction_size)) {
 543     return NativeCall::from(return_address - NativeBranchType::instruction_size);
 544   }
 545 
 546   ShouldNotReachHere();
 547   return NULL;
 548 }
 549 
 550 #endif // CPU_AARCH32_VM_NATIVEINST_AARCH32_HPP