< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Print this page

  23  *
  24  */
  25 
  26 #include <sys/types.h>
  27 
  28 #include "precompiled.hpp"
  29 #include "jvm.h"
  30 #include "asm/assembler.hpp"
  31 #include "asm/assembler.inline.hpp"
  32 #include "ci/ciEnv.hpp"
  33 #include "gc/shared/barrierSet.hpp"
  34 #include "gc/shared/barrierSetAssembler.hpp"
  35 #include "gc/shared/cardTableBarrierSet.hpp"
  36 #include "gc/shared/cardTable.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 #include "interpreter/bytecodeHistogram.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "compiler/compileTask.hpp"
  42 #include "compiler/disassembler.hpp"

  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "nativeInst_aarch64.hpp"
  46 #include "oops/accessDecorators.hpp"

  47 #include "oops/compressedOops.inline.hpp"
  48 #include "oops/klass.inline.hpp"
  49 #include "runtime/icache.hpp"
  50 #include "runtime/interfaceSupport.inline.hpp"
  51 #include "runtime/jniHandles.inline.hpp"
  52 #include "runtime/sharedRuntime.hpp"
  53 #include "runtime/stubRoutines.hpp"
  54 #include "runtime/thread.hpp"
  55 #include "utilities/powerOfTwo.hpp"
  56 #ifdef COMPILER1
  57 #include "c1/c1_LIRAssembler.hpp"
  58 #endif
  59 #ifdef COMPILER2
  60 #include "oops/oop.hpp"
  61 #include "opto/compile.hpp"
  62 #include "opto/node.hpp"
  63 #include "opto/output.hpp"
  64 #endif
  65 
  66 #ifdef PRODUCT

3693   adrp(rscratch1, src2, offset);
3694   ldr(rscratch1, Address(rscratch1, offset));
3695   cmp(src1, rscratch1);
3696 }
3697 
3698 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
3699   cmp(obj1, obj2);
3700 }
3701 
3702 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
3703   load_method_holder(rresult, rmethod);
3704   ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
3705 }
3706 
3707 void MacroAssembler::load_method_holder(Register holder, Register method) {
3708   ldr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
3709   ldr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
3710   ldr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
3711 }
3712 
3713 void MacroAssembler::load_klass(Register dst, Register src) {
3714   if (UseCompressedClassPointers) {
3715     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3716     decode_klass_not_null(dst);
3717   } else {
3718     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));


























3719   }







3720 }
3721 
3722 // ((OopHandle)result).resolve();
3723 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
3724   // OopHandle::resolve is an indirection.
3725   access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp, noreg);
3726 }
3727 
3728 // ((WeakHandle)result).resolve();
3729 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
3730   assert_different_registers(rresult, rtmp);
3731   Label resolved;
3732 
3733   // A null weak handle resolves to null.
3734   cbz(rresult, resolved);
3735 
3736   // Only 64 bit platforms support GCs that require a tmp register
3737   // Only IN_HEAP loads require a thread_tmp register
3738   // WeakHandle::resolve is an indirection like jweak.
3739   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3740                  rresult, Address(rresult), rtmp, /*tmp_thread*/noreg);
3741   bind(resolved);
3742 }
3743 
3744 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp) {
3745   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
3746   ldr(dst, Address(rmethod, Method::const_offset()));
3747   ldr(dst, Address(dst, ConstMethod::constants_offset()));
3748   ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
3749   ldr(dst, Address(dst, mirror_offset));
3750   resolve_oop_handle(dst, tmp);
3751 }
3752 
3753 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
3754   if (UseCompressedClassPointers) {
3755     ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3756     if (CompressedKlassPointers::base() == NULL) {
3757       cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
3758       return;
3759     } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
3760                && CompressedKlassPointers::shift() == 0) {
3761       // Only the bottom 32 bits matter
3762       cmpw(trial_klass, tmp);
3763       return;
3764     }
3765     decode_klass_not_null(tmp);
3766   } else {
3767     ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3768   }

3769   cmp(trial_klass, tmp);
3770 }
3771 
3772 void MacroAssembler::store_klass(Register dst, Register src) {
3773   // FIXME: Should this be a store release?  concurrent gcs assumes
3774   // klass length is valid if klass field is not null.
3775   if (UseCompressedClassPointers) {
3776     encode_klass_not_null(src);
3777     strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
3778   } else {
3779     str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
3780   }
3781 }
3782 
3783 void MacroAssembler::store_klass_gap(Register dst, Register src) {
3784   if (UseCompressedClassPointers) {
3785     // Store to klass gap in destination
3786     strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
3787   }
3788 }
3789 
3790 // Algorithm must match CompressedOops::encode.
3791 void MacroAssembler::encode_heap_oop(Register d, Register s) {
3792 #ifdef ASSERT
3793   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
3794 #endif
3795   verify_oop_msg(s, "broken oop in encode_heap_oop");
3796   if (CompressedOops::base() == NULL) {
3797     if (CompressedOops::shift() != 0) {
3798       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
3799       lsr(d, s, LogMinObjAlignmentInBytes);
3800     } else {
3801       mov(d, s);
3802     }
3803   } else {
3804     subs(d, s, rheapbase);
3805     csel(d, d, zr, Assembler::HS);
3806     lsr(d, d, LogMinObjAlignmentInBytes);
3807 
3808     /*  Old algorithm: is this any worse?
3809     Label nonnull;

3904   // Cannot assert, unverified entry point counts instructions (see .ad file)
3905   // vtableStubs also counts instructions in pd_code_size_limit.
3906   // Also do not verify_oop as this is called by verify_oop.
3907   if (CompressedOops::shift() != 0) {
3908     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
3909     if (CompressedOops::base() != NULL) {
3910       add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
3911     } else {
3912       add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
3913     }
3914   } else {
3915     assert (CompressedOops::base() == NULL, "sanity");
3916     if (dst != src) {
3917       mov(dst, src);
3918     }
3919   }
3920 }
3921 
3922 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);
3923 














3924 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
3925   assert(UseCompressedClassPointers, "not using compressed class pointers");
3926   assert(Metaspace::initialized(), "metaspace not initialized yet");


3927 
3928   if (_klass_decode_mode != KlassDecodeNone) {
3929     return _klass_decode_mode;



3930   }


3931 
3932   assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift()
3933          || 0 == CompressedKlassPointers::shift(), "decode alg wrong");


3934 
3935   if (CompressedKlassPointers::base() == NULL) {
3936     return (_klass_decode_mode = KlassDecodeZero);


3937   }
3938 
3939   if (operand_valid_for_logical_immediate(
3940         /*is32*/false, (uint64_t)CompressedKlassPointers::base())) {
3941     const uint64_t range_mask =
3942       (1ULL << log2i(CompressedKlassPointers::range())) - 1;
3943     if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) {
3944       return (_klass_decode_mode = KlassDecodeXor);
3945     }
3946   }
3947 
3948   const uint64_t shifted_base =
3949     (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
3950   guarantee((shifted_base & 0xffff0000ffffffff) == 0,
3951             "compressed class base bad alignment");
3952 
3953   return (_klass_decode_mode = KlassDecodeMovk);
3954 }
3955 
3956 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {


3957   switch (klass_decode_mode()) {
3958   case KlassDecodeZero:
3959     if (CompressedKlassPointers::shift() != 0) {
3960       lsr(dst, src, LogKlassAlignmentInBytes);
3961     } else {
3962       if (dst != src) mov(dst, src);
3963     }
3964     break;
3965 
3966   case KlassDecodeXor:
3967     if (CompressedKlassPointers::shift() != 0) {
3968       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
3969       lsr(dst, dst, LogKlassAlignmentInBytes);
3970     } else {
3971       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
3972     }
3973     break;
3974 
3975   case KlassDecodeMovk:
3976     if (CompressedKlassPointers::shift() != 0) {
3977       ubfx(dst, src, LogKlassAlignmentInBytes, 32);
3978     } else {
3979       movw(dst, src);
3980     }
3981     break;
3982 
3983   case KlassDecodeNone:
3984     ShouldNotReachHere();
3985     break;
3986   }
3987 }
3988 
3989 void MacroAssembler::encode_klass_not_null(Register r) {
3990   encode_klass_not_null(r, r);
3991 }
3992 
3993 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
3994   assert (UseCompressedClassPointers, "should only be used for compressed headers");
3995 


3996   switch (klass_decode_mode()) {
3997   case KlassDecodeZero:
3998     if (CompressedKlassPointers::shift() != 0) {
3999       lsl(dst, src, LogKlassAlignmentInBytes);
4000     } else {
4001       if (dst != src) mov(dst, src);
4002     }
4003     break;
4004 
4005   case KlassDecodeXor:
4006     if (CompressedKlassPointers::shift() != 0) {
4007       lsl(dst, src, LogKlassAlignmentInBytes);
4008       eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
4009     } else {
4010       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
4011     }
4012     break;
4013 
4014   case KlassDecodeMovk: {
4015     const uint64_t shifted_base =
4016       (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
4017 



4018     if (dst != src) movw(dst, src);
4019     movk(dst, shifted_base >> 32, 32);
4020 
4021     if (CompressedKlassPointers::shift() != 0) {
4022       lsl(dst, dst, LogKlassAlignmentInBytes);
4023     }
4024 
4025     break;
4026   }
4027 
4028   case KlassDecodeNone:
4029     ShouldNotReachHere();
4030     break;
4031   }
4032 }
4033 
4034 void  MacroAssembler::decode_klass_not_null(Register r) {
4035   decode_klass_not_null(r, r);
4036 }
4037 
4038 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
4039 #ifdef ASSERT
4040   {
4041     ThreadInVMfromUnknown tiv;
4042     assert (UseCompressedOops, "should only be used for compressed oops");
4043     assert (Universe::heap() != NULL, "java heap should be initialized");
4044     assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");

  23  *
  24  */
  25 
  26 #include <sys/types.h>
  27 
  28 #include "precompiled.hpp"
  29 #include "jvm.h"
  30 #include "asm/assembler.hpp"
  31 #include "asm/assembler.inline.hpp"
  32 #include "ci/ciEnv.hpp"
  33 #include "gc/shared/barrierSet.hpp"
  34 #include "gc/shared/barrierSetAssembler.hpp"
  35 #include "gc/shared/cardTableBarrierSet.hpp"
  36 #include "gc/shared/cardTable.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 #include "interpreter/bytecodeHistogram.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "compiler/compileTask.hpp"
  42 #include "compiler/disassembler.hpp"
  43 #include "logging/log.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "nativeInst_aarch64.hpp"
  47 #include "oops/accessDecorators.hpp"
  48 #include "oops/compressedKlass.inline.hpp"
  49 #include "oops/compressedOops.inline.hpp"
  50 #include "oops/klass.inline.hpp"
  51 #include "runtime/icache.hpp"
  52 #include "runtime/interfaceSupport.inline.hpp"
  53 #include "runtime/jniHandles.inline.hpp"
  54 #include "runtime/sharedRuntime.hpp"
  55 #include "runtime/stubRoutines.hpp"
  56 #include "runtime/thread.hpp"
  57 #include "utilities/powerOfTwo.hpp"
  58 #ifdef COMPILER1
  59 #include "c1/c1_LIRAssembler.hpp"
  60 #endif
  61 #ifdef COMPILER2
  62 #include "oops/oop.hpp"
  63 #include "opto/compile.hpp"
  64 #include "opto/node.hpp"
  65 #include "opto/output.hpp"
  66 #endif
  67 
  68 #ifdef PRODUCT

3695   adrp(rscratch1, src2, offset);
3696   ldr(rscratch1, Address(rscratch1, offset));
3697   cmp(src1, rscratch1);
3698 }
3699 
3700 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
3701   cmp(obj1, obj2);
3702 }
3703 
3704 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
3705   load_method_holder(rresult, rmethod);
3706   ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
3707 }
3708 
3709 void MacroAssembler::load_method_holder(Register holder, Register method) {
3710   ldr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
3711   ldr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
3712   ldr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
3713 }
3714 
3715 // Loads the obj's Klass* into dst.
3716 // src and dst must be distinct registers
3717 // Preserves all registers (incl src, rscratch1 and rscratch2), but clobbers condition flags
3718 void MacroAssembler::load_nklass(Register dst, Register src) {
3719   assert(UseCompressedClassPointers, "expects UseCompressedClassPointers");
3720 
3721   assert_different_registers(src, dst);
3722 
3723   Label slow, done;
3724 
3725   // Check if we can take the (common) fast path, if obj is unlocked.
3726   ldr(dst, Address(src, oopDesc::mark_offset_in_bytes()));
3727   eor(dst, dst, markWord::unlocked_value);
3728   tst(dst, markWord::lock_mask_in_place);
3729   br(Assembler::NE, slow);
3730 
3731   // Fast-path: shift and decode Klass*.
3732   lsr(dst, dst, markWord::klass_shift);
3733   b(done);
3734 
3735   bind(slow);
3736   RegSet saved_regs = RegSet::of(lr);
3737   // We need r0 as argument and return register for the call. Preserve it, if necessary.
3738   if (dst != r0) {
3739     saved_regs += RegSet::of(r0);
3740   }
3741   push(saved_regs, sp);
3742   mov(r0, src);
3743   assert(StubRoutines::load_nklass() != NULL, "Must have stub");
3744   far_call(RuntimeAddress(StubRoutines::load_nklass()));
3745   if (dst != r0) {
3746     mov(dst, r0);
3747   }
3748   pop(saved_regs, sp);
3749   bind(done);
3750 }
3751 
3752 void MacroAssembler::load_klass(Register dst, Register src) {
3753   load_nklass(dst, src);
3754   decode_klass_not_null(dst);
3755 }
3756 
3757 // ((OopHandle)result).resolve();
3758 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
3759   // OopHandle::resolve is an indirection.
3760   access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp, noreg);
3761 }
3762 
3763 // ((WeakHandle)result).resolve();
3764 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
3765   assert_different_registers(rresult, rtmp);
3766   Label resolved;
3767 
3768   // A null weak handle resolves to null.
3769   cbz(rresult, resolved);
3770 
3771   // Only 64 bit platforms support GCs that require a tmp register
3772   // Only IN_HEAP loads require a thread_tmp register
3773   // WeakHandle::resolve is an indirection like jweak.
3774   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3775                  rresult, Address(rresult), rtmp, /*tmp_thread*/noreg);
3776   bind(resolved);
3777 }
3778 
3779 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp) {
3780   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
3781   ldr(dst, Address(rmethod, Method::const_offset()));
3782   ldr(dst, Address(dst, ConstMethod::constants_offset()));
3783   ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
3784   ldr(dst, Address(dst, mirror_offset));
3785   resolve_oop_handle(dst, tmp);
3786 }
3787 
3788 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
3789   assert(UseCompressedClassPointers, "Lilliput");
3790   load_nklass(tmp, oop);
3791   if (CompressedKlassPointers::base() == NULL) {
3792     cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
3793     return;
3794   } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
3795              && CompressedKlassPointers::shift() == 0) {
3796     // Only the bottom 32 bits matter
3797     cmpw(trial_klass, tmp);
3798     return;




3799   }
3800   decode_klass_not_null(tmp);
3801   cmp(trial_klass, tmp);
3802 }
3803 


















3804 // Algorithm must match CompressedOops::encode.
3805 void MacroAssembler::encode_heap_oop(Register d, Register s) {
3806 #ifdef ASSERT
3807   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
3808 #endif
3809   verify_oop_msg(s, "broken oop in encode_heap_oop");
3810   if (CompressedOops::base() == NULL) {
3811     if (CompressedOops::shift() != 0) {
3812       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
3813       lsr(d, s, LogMinObjAlignmentInBytes);
3814     } else {
3815       mov(d, s);
3816     }
3817   } else {
3818     subs(d, s, rheapbase);
3819     csel(d, d, zr, Assembler::HS);
3820     lsr(d, d, LogMinObjAlignmentInBytes);
3821 
3822     /*  Old algorithm: is this any worse?
3823     Label nonnull;

3918   // Cannot assert, unverified entry point counts instructions (see .ad file)
3919   // vtableStubs also counts instructions in pd_code_size_limit.
3920   // Also do not verify_oop as this is called by verify_oop.
3921   if (CompressedOops::shift() != 0) {
3922     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
3923     if (CompressedOops::base() != NULL) {
3924       add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
3925     } else {
3926       add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
3927     }
3928   } else {
3929     assert (CompressedOops::base() == NULL, "sanity");
3930     if (dst != src) {
3931       mov(dst, src);
3932     }
3933   }
3934 }
3935 
3936 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);
3937 
3938 // Returns a static string
3939 const char* MacroAssembler::describe_klass_decode_mode(MacroAssembler::KlassDecodeMode mode) {
3940   switch (mode) {
3941   case KlassDecodeNone: return "none";
3942   case KlassDecodeZero: return "zero";
3943   case KlassDecodeXor:  return "xor";
3944   case KlassDecodeMovk: return "movk";
3945   default:
3946     ShouldNotReachHere();
3947   }
3948   return NULL;
3949 }
3950 
3951 // Return the current narrow Klass pointer decode mode.
3952 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
3953   if (_klass_decode_mode == KlassDecodeNone) {
3954     // First time initialization
3955     assert(UseCompressedClassPointers, "not using compressed class pointers");
3956     assert(Metaspace::initialized(), "metaspace not initialized yet");
3957 
3958     _klass_decode_mode = klass_decode_mode_for_base(CompressedKlassPointers::base());
3959     guarantee(_klass_decode_mode != KlassDecodeNone,
3960               PTR_FORMAT " is not a valid encoding base on aarch64",
3961               p2i(CompressedKlassPointers::base()));
3962     log_info(metaspace)("klass decode mode initialized: %s", describe_klass_decode_mode(_klass_decode_mode));
3963   }
3964   return _klass_decode_mode;
3965 }
3966 
3967 // Given an arbitrary base address, return the KlassDecodeMode that would be used. Return KlassDecodeNone
3968 // if base address is not valid for encoding.
3969 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode_for_base(address base) {
3970   assert(CompressedKlassPointers::shift() != 0, "not lilliput?");
3971 
3972   const uint64_t base_u64 = (uint64_t) base;
3973 
3974   if (base_u64 == 0) {
3975     return KlassDecodeZero;
3976   }
3977 
3978   if (operand_valid_for_logical_immediate(false, base_u64) &&
3979       ((base_u64 & (KlassEncodingMetaspaceMax - 1)) == 0)) {
3980     return KlassDecodeXor;




3981   }
3982 
3983   const uint64_t shifted_base = base_u64 >> CompressedKlassPointers::shift();
3984   if ((shifted_base & 0xffff0000ffffffff) == 0) {
3985     return KlassDecodeMovk;
3986   }
3987 
3988   return KlassDecodeNone;
3989 }
3990 
3991 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3992   assert (UseCompressedClassPointers, "should only be used for compressed headers");
3993   assert(CompressedKlassPointers::shift() != 0, "not lilliput?");
3994   switch (klass_decode_mode()) {
3995   case KlassDecodeZero:
3996     lsr(dst, src, LogKlassAlignmentInBytes);




3997     break;
3998 
3999   case KlassDecodeXor:
4000     eor(dst, src, (uint64_t)CompressedKlassPointers::base());
4001     lsr(dst, dst, LogKlassAlignmentInBytes);




4002     break;
4003 
4004   case KlassDecodeMovk:
4005     ubfx(dst, src, LogKlassAlignmentInBytes, MaxNarrowKlassPointerBits);




4006     break;
4007 
4008   case KlassDecodeNone:
4009     ShouldNotReachHere();
4010     break;
4011   }
4012 }
4013 
4014 void MacroAssembler::encode_klass_not_null(Register r) {
4015   encode_klass_not_null(r, r);
4016 }
4017 
4018 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
4019   assert (UseCompressedClassPointers, "should only be used for compressed headers");
4020 
4021   assert(CompressedKlassPointers::shift() != 0, "not lilliput?");
4022 
4023   switch (klass_decode_mode()) {
4024   case KlassDecodeZero:
4025     if (dst != src) mov(dst, src);




4026     break;
4027 
4028   case KlassDecodeXor:
4029     lsl(dst, src, LogKlassAlignmentInBytes);
4030     eor(dst, dst, (uint64_t)CompressedKlassPointers::base());




4031     break;
4032 
4033   case KlassDecodeMovk: {
4034     const uint64_t shifted_base =
4035       (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
4036 
4037     // Invalid base should have been gracefully handled via klass_decode_mode() in VM initialization.
4038     assert((shifted_base & 0xffff0000ffffffff) == 0, "incompatible base");
4039 
4040     if (dst != src) movw(dst, src);
4041     movk(dst, shifted_base >> 32, 32);
4042     lsl(dst, dst, LogKlassAlignmentInBytes);




4043     break;
4044   }
4045 
4046   case KlassDecodeNone:
4047     ShouldNotReachHere();
4048     break;
4049   }
4050 }
4051 
4052 void  MacroAssembler::decode_klass_not_null(Register r) {
4053   decode_klass_not_null(r, r);
4054 }
4055 
4056 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
4057 #ifdef ASSERT
4058   {
4059     ThreadInVMfromUnknown tiv;
4060     assert (UseCompressedOops, "should only be used for compressed oops");
4061     assert (Universe::heap() != NULL, "java heap should be initialized");
4062     assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
< prev index next >