< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Print this page

   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/assembler.hpp"
  27 #include "asm/assembler.inline.hpp"
  28 #include "ci/ciEnv.hpp"

  29 #include "code/compiledIC.hpp"
  30 #include "compiler/compileTask.hpp"
  31 #include "compiler/disassembler.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/shared/barrierSet.hpp"
  34 #include "gc/shared/barrierSetAssembler.hpp"
  35 #include "gc/shared/cardTableBarrierSet.hpp"
  36 #include "gc/shared/cardTable.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 #include "interpreter/bytecodeHistogram.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "interpreter/interpreterRuntime.hpp"
  42 #include "jvm.h"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "nativeInst_aarch64.hpp"
  46 #include "oops/accessDecorators.hpp"
  47 #include "oops/compressedKlass.inline.hpp"
  48 #include "oops/compressedOops.inline.hpp"

 304     uint32_t insn2 = insn_at(insn_addr, 1);
 305     uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
 306     Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
 307     guarantee(((dest >> size) << size) == dest, "misaligned target");
 308     return 2;
 309   }
 310   static int adrpAdd(address insn_addr, address &target) {
 311     uintptr_t dest = (uintptr_t)target;
 312     int offset_lo = dest & 0xfff;
 313     Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
 314     return 2;
 315   }
 316   static int adrpMovk(address insn_addr, address &target) {
 317     uintptr_t dest = uintptr_t(target);
 318     Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
 319     dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
 320     target = address(dest);
 321     return 2;
 322   }
 323   static int immediate(address insn_addr, address &target) {










 324     assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 31, 21) == 0b11010010100, "must be");
 325     uint64_t dest = (uint64_t)target;
 326     // Move wide constant
 327     assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 328     assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
 329     Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
 330     Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
 331     Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
 332     return 3;
 333   }
 334   static void verify(address insn_addr, address &target) {
 335 #ifdef ASSERT
 336     address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
 337     if (!(address_is == target)) {
 338       tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
 339       disnm((intptr_t)insn_addr);
 340       assert(address_is == target, "should be");
 341     }
 342 #endif
 343   }

 432     uint32_t insn2 = insn_at(insn_addr, 1);
 433     uint64_t dest = uint64_t(target);
 434     dest = (dest & 0xffff0000ffffffff) |
 435       ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
 436     target = address(dest);
 437 
 438     // We know the destination 4k page. Maybe we have a third
 439     // instruction.
 440     uint32_t insn = insn_at(insn_addr, 0);
 441     uint32_t insn3 = insn_at(insn_addr, 2);
 442     ptrdiff_t byte_offset;
 443     if (offset_for(insn, insn3, byte_offset)) {
 444       target += byte_offset;
 445       return 3;
 446     } else {
 447       return 2;
 448     }
 449   }
 450   static int immediate(address insn_addr, address &target) {
 451     uint32_t *insns = (uint32_t *)insn_addr;










 452     assert(Instruction_aarch64::extract(insns[0], 31, 21) == 0b11010010100, "must be");
 453     // Move wide constant: movz, movk, movk.  See movptr().
 454     assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
 455     assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
 456     target = address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
 457                   + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
 458                   + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
 459     assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 460     assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
 461     return 3;
 462   }
 463   static void verify(address insn_addr, address &target) {
 464   }
 465 };
 466 
 467 address MacroAssembler::target_addr_for_insn(address insn_addr) {
 468   address target;
 469   RelocActions<AArch64Decoder>::run(insn_addr, target);
 470   return target;
 471 }

 937   // Max stub size: alignment nop, TrampolineStub.
 938   return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
 939 }
 940 
 941 void MacroAssembler::emit_static_call_stub() {
 942   // CompiledDirectCall::set_to_interpreted knows the
 943   // exact layout of this stub.
 944 
 945   isb();
 946   mov_metadata(rmethod, nullptr);
 947 
 948   // Jump to the entry point of the c2i stub.
 949   if (codestub_branch_needs_far_jump()) {
 950     movptr(rscratch1, 0);
 951     br(rscratch1);
 952   } else {
 953     b(pc());
 954   }
 955 }
 956 
 957 int MacroAssembler::static_call_stub_size() {
 958   if (!codestub_branch_needs_far_jump()) {
 959     // isb; movk; movz; movz; b
 960     return 5 * NativeInstruction::instruction_size;
 961   }
 962   // isb; movk; movz; movz; movk; movz; movz; br
 963   return 8 * NativeInstruction::instruction_size;
 964 }
 965 
 966 void MacroAssembler::c2bool(Register x) {
 967   // implements x == 0 ? 0 : 1
 968   // note: must only look at least-significant byte of x
 969   //       since C-style booleans are stored in one byte
 970   //       only! (was bug)
 971   tst(x, 0xff);
 972   cset(x, Assembler::NE);
 973 }
 974 
 975 address MacroAssembler::ic_call(address entry, jint method_index) {
 976   RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
 977   movptr(rscratch2, (intptr_t)Universe::non_oop_word());
 978   return trampoline_call(Address(entry, rh));
 979 }
 980 
 981 int MacroAssembler::ic_check_size() {

3289 
3290 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3291   if (decrement.is_register()) {
3292     sub(Rd, Rn, decrement.as_register());
3293   } else {
3294     sub(Rd, Rn, decrement.as_constant());
3295   }
3296 }
3297 
3298 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3299   if (decrement.is_register()) {
3300     subw(Rd, Rn, decrement.as_register());
3301   } else {
3302     subw(Rd, Rn, decrement.as_constant());
3303   }
3304 }
3305 
3306 void MacroAssembler::reinit_heapbase()
3307 {
3308   if (UseCompressedOops) {
3309     if (Universe::is_fully_initialized()) {
3310       mov(rheapbase, CompressedOops::base());
3311     } else {
3312       lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3313       ldr(rheapbase, Address(rheapbase));
3314     }
3315   }
3316 }
3317 
3318 // A generic CAS; success or failure is in the EQ flag.  A weak CAS
3319 // doesn't retry and may fail spuriously.  If the oldval is wanted,
3320 // Pass a register for the result, otherwise pass noreg.
3321 
3322 // Clobbers rscratch1
3323 void MacroAssembler::cmpxchg(Register addr, Register expected,
3324                              Register new_val,
3325                              enum operand_size size,
3326                              bool acquire, bool release,
3327                              bool weak,
3328                              Register result) {
3329   if (result == noreg)  result = rscratch1;

4968 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
4969   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
4970   ldr(dst, Address(rmethod, Method::const_offset()));
4971   ldr(dst, Address(dst, ConstMethod::constants_offset()));
4972   ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
4973   ldr(dst, Address(dst, mirror_offset));
4974   resolve_oop_handle(dst, tmp1, tmp2);
4975 }
4976 
4977 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) {
4978   assert_different_registers(obj, klass, tmp);
4979   if (UseCompressedClassPointers) {
4980     if (UseCompactObjectHeaders) {
4981       load_narrow_klass_compact(tmp, obj);
4982     } else {
4983       ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
4984     }
4985     if (CompressedKlassPointers::base() == nullptr) {
4986       cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
4987       return;
4988     } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0

4989                && CompressedKlassPointers::shift() == 0) {
4990       // Only the bottom 32 bits matter
4991       cmpw(klass, tmp);
4992       return;
4993     }
4994     decode_klass_not_null(tmp);
4995   } else {
4996     ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
4997   }
4998   cmp(klass, tmp);
4999 }
5000 
5001 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5002   if (UseCompactObjectHeaders) {
5003     load_narrow_klass_compact(tmp1, obj1);
5004     load_narrow_klass_compact(tmp2,  obj2);
5005     cmpw(tmp1, tmp2);
5006   } else if (UseCompressedClassPointers) {
5007     ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5008     ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));

5227   if (dst != src) {
5228     // we can load the base into dst, subtract it formthe src and shift down
5229     lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5230     ldr(dst, dst);
5231     sub(dst, src, dst);
5232     lsr(dst, dst, shift);
5233   } else {
5234     // we need an extra register in order to load the coop base
5235     Register tmp = pick_different_tmp(dst, src);
5236     RegSet regs = RegSet::of(tmp);
5237     push(regs, sp);
5238     lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5239     ldr(tmp, tmp);
5240     sub(dst, src, tmp);
5241     lsr(dst, dst, shift);
5242     pop(regs, sp);
5243   }
5244 }
5245 
5246 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5247   if (AOTCodeCache::is_on_for_dump()) {
5248     encode_klass_not_null_for_aot(dst, src);
5249     return;
5250   }
5251 
5252   switch (klass_decode_mode()) {
5253   case KlassDecodeZero:
5254     if (CompressedKlassPointers::shift() != 0) {
5255       lsr(dst, src, CompressedKlassPointers::shift());
5256     } else {
5257       if (dst != src) mov(dst, src);
5258     }
5259     break;
5260 
5261   case KlassDecodeXor:
5262     if (CompressedKlassPointers::shift() != 0) {
5263       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5264       lsr(dst, dst, CompressedKlassPointers::shift());
5265     } else {
5266       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5267     }

5293   if (dst != src) {
5294     // we can load the base into dst then add the offset with a suitable shift
5295     lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5296     ldr(dst, dst);
5297     add(dst, dst, src, LSL,  shift);
5298   } else {
5299     // we need an extra register in order to load the coop base
5300     Register tmp = pick_different_tmp(dst, src);
5301     RegSet regs = RegSet::of(tmp);
5302     push(regs, sp);
5303     lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5304     ldr(tmp, tmp);
5305     add(dst, tmp,  src, LSL,  shift);
5306     pop(regs, sp);
5307   }
5308 }
5309 
5310 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5311   assert (UseCompressedClassPointers, "should only be used for compressed headers");
5312 
5313   if (AOTCodeCache::is_on_for_dump()) {
5314     decode_klass_not_null_for_aot(dst, src);
5315     return;
5316   }
5317 
5318   switch (klass_decode_mode()) {
5319   case KlassDecodeZero:
5320     if (CompressedKlassPointers::shift() != 0) {
5321       lsl(dst, src, CompressedKlassPointers::shift());
5322     } else {
5323       if (dst != src) mov(dst, src);
5324     }
5325     break;
5326 
5327   case KlassDecodeXor:
5328     if (CompressedKlassPointers::shift() != 0) {
5329       lsl(dst, src, CompressedKlassPointers::shift());
5330       eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
5331     } else {
5332       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5333     }

5590   assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
5591 
5592   InstructionMark im(this);
5593   code_section()->relocate(inst_mark(), dest.rspec());
5594   // 8143067: Ensure that the adrp can reach the dest from anywhere within
5595   // the code cache so that if it is relocated we know it will still reach
5596   if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5597     _adrp(reg1, dest.target());
5598   } else {
5599     uint64_t target = (uint64_t)dest.target();
5600     uint64_t adrp_target
5601       = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5602 
5603     _adrp(reg1, (address)adrp_target);
5604     movk(reg1, target >> 32, 32);
5605   }
5606   byte_offset = (uint64_t)dest.target() & 0xfff;
5607 }
5608 
5609 void MacroAssembler::load_byte_map_base(Register reg) {








5610   CardTable::CardValue* byte_map_base =
5611     ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5612 
5613   // Strictly speaking the byte_map_base isn't an address at all, and it might
5614   // even be negative. It is thus materialised as a constant.
5615   mov(reg, (uint64_t)byte_map_base);
5616 }
5617 














5618 void MacroAssembler::build_frame(int framesize) {
5619   assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5620   assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5621   protect_return_address();
5622   if (framesize < ((1 << 9) + 2 * wordSize)) {
5623     sub(sp, sp, framesize);
5624     stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5625     if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5626   } else {
5627     stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5628     if (PreserveFramePointer) mov(rfp, sp);
5629     if (framesize < ((1 << 12) + 2 * wordSize))
5630       sub(sp, sp, framesize - 2 * wordSize);
5631     else {
5632       mov(rscratch1, framesize - 2 * wordSize);
5633       sub(sp, sp, rscratch1);
5634     }
5635   }
5636   verify_cross_modify_fence_not_required();
5637 }

   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/assembler.hpp"
  27 #include "asm/assembler.inline.hpp"
  28 #include "ci/ciEnv.hpp"
  29 #include "ci/ciUtilities.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "compiler/compileTask.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "gc/shared/cardTableBarrierSet.hpp"
  37 #include "gc/shared/cardTable.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/tlab_globals.hpp"
  40 #include "interpreter/bytecodeHistogram.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "interpreter/interpreterRuntime.hpp"
  43 #include "jvm.h"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "nativeInst_aarch64.hpp"
  47 #include "oops/accessDecorators.hpp"
  48 #include "oops/compressedKlass.inline.hpp"
  49 #include "oops/compressedOops.inline.hpp"

 305     uint32_t insn2 = insn_at(insn_addr, 1);
 306     uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
 307     Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
 308     guarantee(((dest >> size) << size) == dest, "misaligned target");
 309     return 2;
 310   }
 311   static int adrpAdd(address insn_addr, address &target) {
 312     uintptr_t dest = (uintptr_t)target;
 313     int offset_lo = dest & 0xfff;
 314     Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
 315     return 2;
 316   }
 317   static int adrpMovk(address insn_addr, address &target) {
 318     uintptr_t dest = uintptr_t(target);
 319     Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
 320     dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
 321     target = address(dest);
 322     return 2;
 323   }
 324   static int immediate(address insn_addr, address &target) {
 325     // Metadata pointers are either narrow (32 bits) or wide (48 bits).
 326     // We encode narrow ones by setting the upper 16 bits in the first
 327     // instruction.
 328     if (Instruction_aarch64::extract(insn_at(insn_addr, 0), 31, 21) == 0b11010010101) {
 329       assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 330       narrowKlass nk = CompressedKlassPointers::encode((Klass*)target);
 331       Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16);
 332       Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff);
 333       return 2;
 334     }
 335     assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 31, 21) == 0b11010010100, "must be");
 336     uint64_t dest = (uint64_t)target;
 337     // Move wide constant
 338     assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 339     assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
 340     Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
 341     Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
 342     Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
 343     return 3;
 344   }
 345   static void verify(address insn_addr, address &target) {
 346 #ifdef ASSERT
 347     address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
 348     if (!(address_is == target)) {
 349       tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
 350       disnm((intptr_t)insn_addr);
 351       assert(address_is == target, "should be");
 352     }
 353 #endif
 354   }

 443     uint32_t insn2 = insn_at(insn_addr, 1);
 444     uint64_t dest = uint64_t(target);
 445     dest = (dest & 0xffff0000ffffffff) |
 446       ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
 447     target = address(dest);
 448 
 449     // We know the destination 4k page. Maybe we have a third
 450     // instruction.
 451     uint32_t insn = insn_at(insn_addr, 0);
 452     uint32_t insn3 = insn_at(insn_addr, 2);
 453     ptrdiff_t byte_offset;
 454     if (offset_for(insn, insn3, byte_offset)) {
 455       target += byte_offset;
 456       return 3;
 457     } else {
 458       return 2;
 459     }
 460   }
 461   static int immediate(address insn_addr, address &target) {
 462     uint32_t *insns = (uint32_t *)insn_addr;
 463     // Metadata pointers are either narrow (32 bits) or wide (48 bits).
 464     // We encode narrow ones by setting the upper 16 bits in the first
 465     // instruction.
 466     if (Instruction_aarch64::extract(insns[0], 31, 21) == 0b11010010101) {
 467       assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 468       narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(insns[0], 20, 5)) << 16)
 469                                    +  uint32_t(Instruction_aarch64::extract(insns[1], 20, 5)));
 470       target = (address)CompressedKlassPointers::decode(nk);
 471       return 2;
 472     }
 473     assert(Instruction_aarch64::extract(insns[0], 31, 21) == 0b11010010100, "must be");
 474     // Move wide constant: movz, movk, movk.  See movptr().
 475     assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
 476     assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
 477     target = address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
 478                   + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
 479                   + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
 480     assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 481     assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
 482     return 3;
 483   }
 484   static void verify(address insn_addr, address &target) {
 485   }
 486 };
 487 
 488 address MacroAssembler::target_addr_for_insn(address insn_addr) {
 489   address target;
 490   RelocActions<AArch64Decoder>::run(insn_addr, target);
 491   return target;
 492 }

 958   // Max stub size: alignment nop, TrampolineStub.
 959   return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
 960 }
 961 
 962 void MacroAssembler::emit_static_call_stub() {
 963   // CompiledDirectCall::set_to_interpreted knows the
 964   // exact layout of this stub.
 965 
 966   isb();
 967   mov_metadata(rmethod, nullptr);
 968 
 969   // Jump to the entry point of the c2i stub.
 970   if (codestub_branch_needs_far_jump()) {
 971     movptr(rscratch1, 0);
 972     br(rscratch1);
 973   } else {
 974     b(pc());
 975   }
 976 }
 977 
 978 int MacroAssembler::max_static_call_stub_size() {




 979   // isb; movk; movz; movz; movk; movz; movz; br
 980   return 8 * NativeInstruction::instruction_size;
 981 }
 982 
 983 void MacroAssembler::c2bool(Register x) {
 984   // implements x == 0 ? 0 : 1
 985   // note: must only look at least-significant byte of x
 986   //       since C-style booleans are stored in one byte
 987   //       only! (was bug)
 988   tst(x, 0xff);
 989   cset(x, Assembler::NE);
 990 }
 991 
 992 address MacroAssembler::ic_call(address entry, jint method_index) {
 993   RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
 994   movptr(rscratch2, (intptr_t)Universe::non_oop_word());
 995   return trampoline_call(Address(entry, rh));
 996 }
 997 
 998 int MacroAssembler::ic_check_size() {

3306 
3307 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3308   if (decrement.is_register()) {
3309     sub(Rd, Rn, decrement.as_register());
3310   } else {
3311     sub(Rd, Rn, decrement.as_constant());
3312   }
3313 }
3314 
3315 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3316   if (decrement.is_register()) {
3317     subw(Rd, Rn, decrement.as_register());
3318   } else {
3319     subw(Rd, Rn, decrement.as_constant());
3320   }
3321 }
3322 
3323 void MacroAssembler::reinit_heapbase()
3324 {
3325   if (UseCompressedOops) {
3326     if (Universe::is_fully_initialized() && !AOTCodeCache::is_on_for_dump()) {
3327       mov(rheapbase, CompressedOops::base());
3328     } else {
3329       lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3330       ldr(rheapbase, Address(rheapbase));
3331     }
3332   }
3333 }
3334 
3335 // A generic CAS; success or failure is in the EQ flag.  A weak CAS
3336 // doesn't retry and may fail spuriously.  If the oldval is wanted,
3337 // Pass a register for the result, otherwise pass noreg.
3338 
3339 // Clobbers rscratch1
3340 void MacroAssembler::cmpxchg(Register addr, Register expected,
3341                              Register new_val,
3342                              enum operand_size size,
3343                              bool acquire, bool release,
3344                              bool weak,
3345                              Register result) {
3346   if (result == noreg)  result = rscratch1;

4985 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
4986   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
4987   ldr(dst, Address(rmethod, Method::const_offset()));
4988   ldr(dst, Address(dst, ConstMethod::constants_offset()));
4989   ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
4990   ldr(dst, Address(dst, mirror_offset));
4991   resolve_oop_handle(dst, tmp1, tmp2);
4992 }
4993 
4994 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) {
4995   assert_different_registers(obj, klass, tmp);
4996   if (UseCompressedClassPointers) {
4997     if (UseCompactObjectHeaders) {
4998       load_narrow_klass_compact(tmp, obj);
4999     } else {
5000       ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5001     }
5002     if (CompressedKlassPointers::base() == nullptr) {
5003       cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
5004       return;
5005     } else if (!AOTCodeCache::is_on_for_dump() &&
5006                ((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
5007                && CompressedKlassPointers::shift() == 0) {
5008       // Only the bottom 32 bits matter
5009       cmpw(klass, tmp);
5010       return;
5011     }
5012     decode_klass_not_null(tmp);
5013   } else {
5014     ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5015   }
5016   cmp(klass, tmp);
5017 }
5018 
5019 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5020   if (UseCompactObjectHeaders) {
5021     load_narrow_klass_compact(tmp1, obj1);
5022     load_narrow_klass_compact(tmp2,  obj2);
5023     cmpw(tmp1, tmp2);
5024   } else if (UseCompressedClassPointers) {
5025     ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5026     ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));

5245   if (dst != src) {
5246     // we can load the base into dst, subtract it formthe src and shift down
5247     lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5248     ldr(dst, dst);
5249     sub(dst, src, dst);
5250     lsr(dst, dst, shift);
5251   } else {
5252     // we need an extra register in order to load the coop base
5253     Register tmp = pick_different_tmp(dst, src);
5254     RegSet regs = RegSet::of(tmp);
5255     push(regs, sp);
5256     lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5257     ldr(tmp, tmp);
5258     sub(dst, src, tmp);
5259     lsr(dst, dst, shift);
5260     pop(regs, sp);
5261   }
5262 }
5263 
5264 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5265   if (CompressedKlassPointers::base() != nullptr && AOTCodeCache::is_on_for_dump()) {
5266     encode_klass_not_null_for_aot(dst, src);
5267     return;
5268   }
5269 
5270   switch (klass_decode_mode()) {
5271   case KlassDecodeZero:
5272     if (CompressedKlassPointers::shift() != 0) {
5273       lsr(dst, src, CompressedKlassPointers::shift());
5274     } else {
5275       if (dst != src) mov(dst, src);
5276     }
5277     break;
5278 
5279   case KlassDecodeXor:
5280     if (CompressedKlassPointers::shift() != 0) {
5281       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5282       lsr(dst, dst, CompressedKlassPointers::shift());
5283     } else {
5284       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5285     }

5311   if (dst != src) {
5312     // we can load the base into dst then add the offset with a suitable shift
5313     lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5314     ldr(dst, dst);
5315     add(dst, dst, src, LSL,  shift);
5316   } else {
5317     // we need an extra register in order to load the coop base
5318     Register tmp = pick_different_tmp(dst, src);
5319     RegSet regs = RegSet::of(tmp);
5320     push(regs, sp);
5321     lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5322     ldr(tmp, tmp);
5323     add(dst, tmp,  src, LSL,  shift);
5324     pop(regs, sp);
5325   }
5326 }
5327 
5328 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5329   assert (UseCompressedClassPointers, "should only be used for compressed headers");
5330 
5331   if (CompressedKlassPointers::base() != nullptr && AOTCodeCache::is_on_for_dump()) {
5332     decode_klass_not_null_for_aot(dst, src);
5333     return;
5334   }
5335 
5336   switch (klass_decode_mode()) {
5337   case KlassDecodeZero:
5338     if (CompressedKlassPointers::shift() != 0) {
5339       lsl(dst, src, CompressedKlassPointers::shift());
5340     } else {
5341       if (dst != src) mov(dst, src);
5342     }
5343     break;
5344 
5345   case KlassDecodeXor:
5346     if (CompressedKlassPointers::shift() != 0) {
5347       lsl(dst, src, CompressedKlassPointers::shift());
5348       eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
5349     } else {
5350       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5351     }

5608   assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
5609 
5610   InstructionMark im(this);
5611   code_section()->relocate(inst_mark(), dest.rspec());
5612   // 8143067: Ensure that the adrp can reach the dest from anywhere within
5613   // the code cache so that if it is relocated we know it will still reach
5614   if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5615     _adrp(reg1, dest.target());
5616   } else {
5617     uint64_t target = (uint64_t)dest.target();
5618     uint64_t adrp_target
5619       = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5620 
5621     _adrp(reg1, (address)adrp_target);
5622     movk(reg1, target >> 32, 32);
5623   }
5624   byte_offset = (uint64_t)dest.target() & 0xfff;
5625 }
5626 
5627 void MacroAssembler::load_byte_map_base(Register reg) {
5628 #if INCLUDE_CDS
5629   if (AOTCodeCache::is_on_for_dump()) {
5630     address byte_map_base_adr = AOTRuntimeConstants::card_table_address();
5631     lea(reg, ExternalAddress(byte_map_base_adr));
5632     ldr(reg, Address(reg));
5633     return;
5634   }
5635 #endif
5636   CardTable::CardValue* byte_map_base =
5637     ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5638 
5639   // Strictly speaking the byte_map_base isn't an address at all, and it might
5640   // even be negative. It is thus materialised as a constant.
5641   mov(reg, (uint64_t)byte_map_base);
5642 }
5643 
5644 void MacroAssembler::load_aotrc_address(Register reg, address a) {
5645 #if INCLUDE_CDS
5646   assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
5647   if (AOTCodeCache::is_on_for_dump()) {
5648     // all aotrc field addresses should be registered in the AOTCodeCache address table
5649     lea(reg, ExternalAddress(a));
5650   } else {
5651     mov(reg, (uint64_t)a);
5652   }
5653 #else
5654   ShouldNotReachHere();
5655 #endif
5656 }
5657 
5658 void MacroAssembler::build_frame(int framesize) {
5659   assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5660   assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5661   protect_return_address();
5662   if (framesize < ((1 << 9) + 2 * wordSize)) {
5663     sub(sp, sp, framesize);
5664     stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5665     if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5666   } else {
5667     stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5668     if (PreserveFramePointer) mov(rfp, sp);
5669     if (framesize < ((1 << 12) + 2 * wordSize))
5670       sub(sp, sp, framesize - 2 * wordSize);
5671     else {
5672       mov(rscratch1, framesize - 2 * wordSize);
5673       sub(sp, sp, rscratch1);
5674     }
5675   }
5676   verify_cross_modify_fence_not_required();
5677 }
< prev index next >