< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Print this page
*** 27,10 ***
--- 27,12 ---
  
  #include "precompiled.hpp"
  #include "asm/assembler.hpp"
  #include "asm/assembler.inline.hpp"
  #include "ci/ciEnv.hpp"
+ #include "ci/ciUtilities.hpp"
+ #include "code/SCCache.hpp"
  #include "compiler/compileTask.hpp"
  #include "compiler/disassembler.hpp"
  #include "compiler/oopMap.hpp"
  #include "gc/shared/barrierSet.hpp"
  #include "gc/shared/barrierSetAssembler.hpp"

*** 344,10 ***
--- 346,20 ---
      dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
      target = address(dest);
      return 2;
    }
    virtual int immediate(address insn_addr, address &target) {
+     // Metadata pointers are either narrow (32 bits) or wide (48 bits).
+     // We encode narrow ones by setting the upper 16 bits in the first
+     // instruction.
+     if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
+       assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
+       narrowKlass nk = CompressedKlassPointers::encode((Klass*)target);
+       Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16);
+       Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff);
+       return 2;
+     }
      assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
      uint64_t dest = (uint64_t)target;
      // Move wide constant
      assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
      assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");

*** 474,10 ***
--- 486,20 ---
        return 2;
      }
    }
    virtual int immediate(address insn_addr, address &target) {
      uint32_t *insns = (uint32_t *)insn_addr;
+     // Metadata pointers are either narrow (32 bits) or wide (48 bits).
+     // We encode narrow ones by setting the upper 16 bits in the first
+     // instruction.
+     if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
+       assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
+       narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(_insn, 20, 5)) << 16)
+                                    +  uint32_t(Instruction_aarch64::extract(insns[1], 20, 5)));
+       target = (address)CompressedKlassPointers::decode(nk);
+       return 2;
+     }
      assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
      // Move wide constant: movz, movk, movk.  See movptr().
      assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
      assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
      target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))

*** 671,10 ***
--- 693,13 ---
      set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
    }
  }
  
  static inline bool target_needs_far_branch(address addr) {
+   if (SCCache::is_on_for_write()) {
+     return true;
+   }
    // codecache size <= 128M
    if (!MacroAssembler::far_branches()) {
      return false;
    }
    // codecache size > 240M

*** 1505,11 ***
    if (super_klass != r0) {
      mov(r0, super_klass);
    }
  
  #ifndef PRODUCT
!   mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr);
    Address pst_counter_addr(rscratch2);
    ldr(rscratch1, pst_counter_addr);
    add(rscratch1, rscratch1, 1);
    str(rscratch1, pst_counter_addr);
  #endif //PRODUCT
--- 1530,16 ---
    if (super_klass != r0) {
      mov(r0, super_klass);
    }
  
  #ifndef PRODUCT
!   if (SCCache::is_on_for_write()) {
+     // SCA needs relocation info for this
+     lea(rscratch2, ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
+   } else {
+     mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr);
+   }
    Address pst_counter_addr(rscratch2);
    ldr(rscratch1, pst_counter_addr);
    add(rscratch1, rscratch1, 1);
    str(rscratch1, pst_counter_addr);
  #endif //PRODUCT

*** 2727,11 ***
  }
  
  void MacroAssembler::reinit_heapbase()
  {
    if (UseCompressedOops) {
!     if (Universe::is_fully_initialized()) {
        mov(rheapbase, CompressedOops::ptrs_base());
      } else {
        lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
        ldr(rheapbase, Address(rheapbase));
      }
--- 2757,11 ---
  }
  
  void MacroAssembler::reinit_heapbase()
  {
    if (UseCompressedOops) {
!     if (Universe::is_fully_initialized() && !SCCache::is_on_for_write()) {
        mov(rheapbase, CompressedOops::ptrs_base());
      } else {
        lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
        ldr(rheapbase, Address(rheapbase));
      }

*** 5030,11 ***
    CardTable::CardValue* byte_map_base =
      ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
  
    // Strictly speaking the byte_map_base isn't an address at all, and it might
    // even be negative. It is thus materialised as a constant.
!   mov(reg, (uint64_t)byte_map_base);
  }
  
  void MacroAssembler::build_frame(int framesize) {
    assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
    assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
--- 5060,16 ---
    CardTable::CardValue* byte_map_base =
      ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
  
    // Strictly speaking the byte_map_base isn't an address at all, and it might
    // even be negative. It is thus materialised as a constant.
!   if (SCCache::is_on_for_write()) {
+     // SCA needs relocation info for card table base
+     lea(reg, ExternalAddress(reinterpret_cast<address>(byte_map_base)));
+   } else {
+     mov(reg, (uint64_t)byte_map_base);
+   }
  }
  
  void MacroAssembler::build_frame(int framesize) {
    assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
    assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
< prev index next >