< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Print this page

  24  */
  25 
  26 #include <sys/types.h>
  27 
  28 #include "precompiled.hpp"
  29 #include "jvm.h"
  30 #include "asm/assembler.hpp"
  31 #include "asm/assembler.inline.hpp"
  32 #include "ci/ciEnv.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "gc/shared/cardTableBarrierSet.hpp"
  37 #include "gc/shared/cardTable.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/tlab_globals.hpp"
  40 #include "interpreter/bytecodeHistogram.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "compiler/compileTask.hpp"
  43 #include "compiler/disassembler.hpp"

  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "nativeInst_aarch64.hpp"
  47 #include "oops/accessDecorators.hpp"

  48 #include "oops/compressedOops.inline.hpp"
  49 #include "oops/klass.inline.hpp"
  50 #include "runtime/continuation.hpp"
  51 #include "runtime/icache.hpp"
  52 #include "runtime/interfaceSupport.inline.hpp"
  53 #include "runtime/javaThread.hpp"
  54 #include "runtime/jniHandles.inline.hpp"
  55 #include "runtime/sharedRuntime.hpp"
  56 #include "runtime/stubRoutines.hpp"
  57 #include "utilities/powerOfTwo.hpp"
  58 #ifdef COMPILER1
  59 #include "c1/c1_LIRAssembler.hpp"
  60 #endif
  61 #ifdef COMPILER2
  62 #include "oops/oop.hpp"
  63 #include "opto/compile.hpp"
  64 #include "opto/node.hpp"
  65 #include "opto/output.hpp"
  66 #endif
  67 

4024   adrp(rscratch1, src2, offset);
4025   ldr(rscratch1, Address(rscratch1, offset));
4026   cmp(src1, rscratch1);
4027 }
4028 
4029 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
4030   cmp(obj1, obj2);
4031 }
4032 
4033 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
4034   load_method_holder(rresult, rmethod);
4035   ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
4036 }
4037 
4038 void MacroAssembler::load_method_holder(Register holder, Register method) {
4039   ldr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
4040   ldr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
4041   ldr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
4042 }
4043 
4044 void MacroAssembler::load_klass(Register dst, Register src) {
4045   if (UseCompressedClassPointers) {
4046     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4047     decode_klass_not_null(dst);
4048   } else {
4049     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));



















4050   }














4051 }
4052 
4053 // ((OopHandle)result).resolve();
4054 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
4055   // OopHandle::resolve is an indirection.
4056   access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
4057 }
4058 
4059 // ((WeakHandle)result).resolve();
4060 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
4061   assert_different_registers(result, tmp1, tmp2);
4062   Label resolved;
4063 
4064   // A null weak handle resolves to null.
4065   cbz(result, resolved);
4066 
4067   // Only 64 bit platforms support GCs that require a tmp register
4068   // WeakHandle::resolve is an indirection like jweak.
4069   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
4070                  result, Address(result), tmp1, tmp2);
4071   bind(resolved);
4072 }
4073 
4074 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
4075   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
4076   ldr(dst, Address(rmethod, Method::const_offset()));
4077   ldr(dst, Address(dst, ConstMethod::constants_offset()));
4078   ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
4079   ldr(dst, Address(dst, mirror_offset));
4080   resolve_oop_handle(dst, tmp1, tmp2);
4081 }
4082 
4083 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
4084   if (UseCompressedClassPointers) {
4085     ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4086     if (CompressedKlassPointers::base() == NULL) {
4087       cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
4088       return;
4089     } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
4090                && CompressedKlassPointers::shift() == 0) {
4091       // Only the bottom 32 bits matter
4092       cmpw(trial_klass, tmp);
4093       return;
4094     }
4095     decode_klass_not_null(tmp);
4096   } else {
4097     ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4098   }

4099   cmp(trial_klass, tmp);
4100 }
4101 
4102 void MacroAssembler::store_klass(Register dst, Register src) {
4103   // FIXME: Should this be a store release?  concurrent gcs assumes
4104   // klass length is valid if klass field is not null.
4105   if (UseCompressedClassPointers) {
4106     encode_klass_not_null(src);
4107     strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4108   } else {
4109     str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4110   }
4111 }
4112 
4113 void MacroAssembler::store_klass_gap(Register dst, Register src) {
4114   if (UseCompressedClassPointers) {
4115     // Store to klass gap in destination
4116     strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
4117   }
4118 }
4119 
4120 // Algorithm must match CompressedOops::encode.
4121 void MacroAssembler::encode_heap_oop(Register d, Register s) {
4122 #ifdef ASSERT
4123   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
4124 #endif
4125   verify_oop_msg(s, "broken oop in encode_heap_oop");
4126   if (CompressedOops::base() == NULL) {
4127     if (CompressedOops::shift() != 0) {
4128       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4129       lsr(d, s, LogMinObjAlignmentInBytes);
4130     } else {
4131       mov(d, s);
4132     }
4133   } else {
4134     subs(d, s, rheapbase);
4135     csel(d, d, zr, Assembler::HS);
4136     lsr(d, d, LogMinObjAlignmentInBytes);
4137 
4138     /*  Old algorithm: is this any worse?
4139     Label nonnull;

4234   // Cannot assert, unverified entry point counts instructions (see .ad file)
4235   // vtableStubs also counts instructions in pd_code_size_limit.
4236   // Also do not verify_oop as this is called by verify_oop.
4237   if (CompressedOops::shift() != 0) {
4238     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4239     if (CompressedOops::base() != NULL) {
4240       add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
4241     } else {
4242       add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
4243     }
4244   } else {
4245     assert (CompressedOops::base() == NULL, "sanity");
4246     if (dst != src) {
4247       mov(dst, src);
4248     }
4249   }
4250 }
4251 
4252 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);
4253 














4254 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
4255   assert(UseCompressedClassPointers, "not using compressed class pointers");
4256   assert(Metaspace::initialized(), "metaspace not initialized yet");


4257 
4258   if (_klass_decode_mode != KlassDecodeNone) {
4259     return _klass_decode_mode;



4260   }


4261 
4262   assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift()
4263          || 0 == CompressedKlassPointers::shift(), "decode alg wrong");


4264 
4265   if (CompressedKlassPointers::base() == NULL) {
4266     return (_klass_decode_mode = KlassDecodeZero);


4267   }
4268 
4269   if (operand_valid_for_logical_immediate(
4270         /*is32*/false, (uint64_t)CompressedKlassPointers::base())) {
4271     const uint64_t range_mask =
4272       (1ULL << log2i(CompressedKlassPointers::range())) - 1;
4273     if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) {
4274       return (_klass_decode_mode = KlassDecodeXor);
4275     }
4276   }
4277 
4278   const uint64_t shifted_base =
4279     (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
4280   guarantee((shifted_base & 0xffff0000ffffffff) == 0,
4281             "compressed class base bad alignment");
4282 
4283   return (_klass_decode_mode = KlassDecodeMovk);
4284 }
4285 
4286 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {


4287   switch (klass_decode_mode()) {
4288   case KlassDecodeZero:
4289     if (CompressedKlassPointers::shift() != 0) {
4290       lsr(dst, src, LogKlassAlignmentInBytes);
4291     } else {
4292       if (dst != src) mov(dst, src);
4293     }
4294     break;
4295 
4296   case KlassDecodeXor:
4297     if (CompressedKlassPointers::shift() != 0) {
4298       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
4299       lsr(dst, dst, LogKlassAlignmentInBytes);
4300     } else {
4301       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
4302     }
4303     break;
4304 
4305   case KlassDecodeMovk:
4306     if (CompressedKlassPointers::shift() != 0) {
4307       ubfx(dst, src, LogKlassAlignmentInBytes, 32);
4308     } else {
4309       movw(dst, src);
4310     }
4311     break;
4312 
4313   case KlassDecodeNone:
4314     ShouldNotReachHere();
4315     break;
4316   }
4317 }
4318 
4319 void MacroAssembler::encode_klass_not_null(Register r) {
4320   encode_klass_not_null(r, r);
4321 }
4322 
4323 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
4324   assert (UseCompressedClassPointers, "should only be used for compressed headers");
4325 


4326   switch (klass_decode_mode()) {
4327   case KlassDecodeZero:
4328     if (CompressedKlassPointers::shift() != 0) {
4329       lsl(dst, src, LogKlassAlignmentInBytes);
4330     } else {
4331       if (dst != src) mov(dst, src);
4332     }
4333     break;
4334 
4335   case KlassDecodeXor:
4336     if (CompressedKlassPointers::shift() != 0) {
4337       lsl(dst, src, LogKlassAlignmentInBytes);
4338       eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
4339     } else {
4340       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
4341     }
4342     break;
4343 
4344   case KlassDecodeMovk: {
4345     const uint64_t shifted_base =
4346       (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
4347 



4348     if (dst != src) movw(dst, src);
4349     movk(dst, shifted_base >> 32, 32);
4350 
4351     if (CompressedKlassPointers::shift() != 0) {
4352       lsl(dst, dst, LogKlassAlignmentInBytes);
4353     }
4354 
4355     break;
4356   }
4357 
4358   case KlassDecodeNone:
4359     ShouldNotReachHere();
4360     break;
4361   }
4362 }
4363 
4364 void  MacroAssembler::decode_klass_not_null(Register r) {
4365   decode_klass_not_null(r, r);
4366 }
4367 
4368 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
4369 #ifdef ASSERT
4370   {
4371     ThreadInVMfromUnknown tiv;
4372     assert (UseCompressedOops, "should only be used for compressed oops");
4373     assert (Universe::heap() != NULL, "java heap should be initialized");
4374     assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");

5916   }
5917 }
5918 
5919 
5920 // A double move
5921 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
5922  if (src.first()->is_stack()) {
5923     if (dst.first()->is_stack()) {
5924       ldr(tmp, Address(rfp, reg2offset_in(src.first())));
5925       str(tmp, Address(sp, reg2offset_out(dst.first())));
5926     } else {
5927       ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
5928     }
5929   } else if (src.first() != dst.first()) {
5930     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
5931       fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
5932     else
5933       strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
5934   }
5935 }















































  24  */
  25 
  26 #include <sys/types.h>
  27 
  28 #include "precompiled.hpp"
  29 #include "jvm.h"
  30 #include "asm/assembler.hpp"
  31 #include "asm/assembler.inline.hpp"
  32 #include "ci/ciEnv.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "gc/shared/cardTableBarrierSet.hpp"
  37 #include "gc/shared/cardTable.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/tlab_globals.hpp"
  40 #include "interpreter/bytecodeHistogram.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "compiler/compileTask.hpp"
  43 #include "compiler/disassembler.hpp"
  44 #include "logging/log.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "memory/universe.hpp"
  47 #include "nativeInst_aarch64.hpp"
  48 #include "oops/accessDecorators.hpp"
  49 #include "oops/compressedKlass.inline.hpp"
  50 #include "oops/compressedOops.inline.hpp"
  51 #include "oops/klass.inline.hpp"
  52 #include "runtime/continuation.hpp"
  53 #include "runtime/icache.hpp"
  54 #include "runtime/interfaceSupport.inline.hpp"
  55 #include "runtime/javaThread.hpp"
  56 #include "runtime/jniHandles.inline.hpp"
  57 #include "runtime/sharedRuntime.hpp"
  58 #include "runtime/stubRoutines.hpp"
  59 #include "utilities/powerOfTwo.hpp"
  60 #ifdef COMPILER1
  61 #include "c1/c1_LIRAssembler.hpp"
  62 #endif
  63 #ifdef COMPILER2
  64 #include "oops/oop.hpp"
  65 #include "opto/compile.hpp"
  66 #include "opto/node.hpp"
  67 #include "opto/output.hpp"
  68 #endif
  69 

4026   adrp(rscratch1, src2, offset);
4027   ldr(rscratch1, Address(rscratch1, offset));
4028   cmp(src1, rscratch1);
4029 }
4030 
4031 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
4032   cmp(obj1, obj2);
4033 }
4034 
4035 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
4036   load_method_holder(rresult, rmethod);
4037   ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
4038 }
4039 
4040 void MacroAssembler::load_method_holder(Register holder, Register method) {
4041   ldr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
4042   ldr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
4043   ldr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
4044 }
4045 
4046 // Loads the obj's Klass* into dst.
4047 // src and dst must be distinct registers
4048 // Preserves all registers (incl src, rscratch1 and rscratch2), but clobbers condition flags
4049 void MacroAssembler::load_nklass(Register dst, Register src) {
4050   assert(UseCompressedClassPointers, "expects UseCompressedClassPointers");
4051 
4052   assert_different_registers(src, dst);
4053 
4054   Label slow, done;
4055 
4056   // Check if we can take the (common) fast path, if obj is unlocked.
4057   ldr(dst, Address(src, oopDesc::mark_offset_in_bytes()));
4058   eor(dst, dst, markWord::unlocked_value);
4059   tst(dst, markWord::lock_mask_in_place);
4060   br(Assembler::NE, slow);
4061 
4062   // Fast-path: shift and decode Klass*.
4063   lsr(dst, dst, markWord::klass_shift);
4064   b(done);
4065 
4066   bind(slow);
4067   RegSet saved_regs = RegSet::of(lr);
4068   // We need r0 as argument and return register for the call. Preserve it, if necessary.
4069   if (dst != r0) {
4070     saved_regs += RegSet::of(r0);
4071   }
4072   push(saved_regs, sp);
4073   mov(r0, src);
4074   assert(StubRoutines::load_nklass() != NULL, "Must have stub");
4075   far_call(RuntimeAddress(StubRoutines::load_nklass()));
4076   if (dst != r0) {
4077     mov(dst, r0);
4078   }
4079   pop(saved_regs, sp);
4080   bind(done);
4081 }
4082 
4083 void MacroAssembler::load_klass(Register dst, Register src) {
4084   load_nklass(dst, src);
4085   decode_klass_not_null(dst);
4086 }
4087 
4088 // ((OopHandle)result).resolve();
4089 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
4090   // OopHandle::resolve is an indirection.
4091   access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
4092 }
4093 
4094 // ((WeakHandle)result).resolve();
4095 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
4096   assert_different_registers(result, tmp1, tmp2);
4097   Label resolved;
4098 
4099   // A null weak handle resolves to null.
4100   cbz(result, resolved);
4101 
4102   // Only 64 bit platforms support GCs that require a tmp register
4103   // WeakHandle::resolve is an indirection like jweak.
4104   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
4105                  result, Address(result), tmp1, tmp2);
4106   bind(resolved);
4107 }
4108 
4109 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
4110   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
4111   ldr(dst, Address(rmethod, Method::const_offset()));
4112   ldr(dst, Address(dst, ConstMethod::constants_offset()));
4113   ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
4114   ldr(dst, Address(dst, mirror_offset));
4115   resolve_oop_handle(dst, tmp1, tmp2);
4116 }
4117 
4118 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
4119   assert(UseCompressedClassPointers, "Lilliput");
4120   load_nklass(tmp, oop);
4121   if (CompressedKlassPointers::base() == NULL) {
4122     cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
4123     return;
4124   } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
4125              && CompressedKlassPointers::shift() == 0) {
4126     // Only the bottom 32 bits matter
4127     cmpw(trial_klass, tmp);
4128     return;




4129   }
4130   decode_klass_not_null(tmp);
4131   cmp(trial_klass, tmp);
4132 }
4133 


















4134 // Algorithm must match CompressedOops::encode.
4135 void MacroAssembler::encode_heap_oop(Register d, Register s) {
4136 #ifdef ASSERT
4137   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
4138 #endif
4139   verify_oop_msg(s, "broken oop in encode_heap_oop");
4140   if (CompressedOops::base() == NULL) {
4141     if (CompressedOops::shift() != 0) {
4142       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4143       lsr(d, s, LogMinObjAlignmentInBytes);
4144     } else {
4145       mov(d, s);
4146     }
4147   } else {
4148     subs(d, s, rheapbase);
4149     csel(d, d, zr, Assembler::HS);
4150     lsr(d, d, LogMinObjAlignmentInBytes);
4151 
4152     /*  Old algorithm: is this any worse?
4153     Label nonnull;

4248   // Cannot assert, unverified entry point counts instructions (see .ad file)
4249   // vtableStubs also counts instructions in pd_code_size_limit.
4250   // Also do not verify_oop as this is called by verify_oop.
4251   if (CompressedOops::shift() != 0) {
4252     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4253     if (CompressedOops::base() != NULL) {
4254       add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
4255     } else {
4256       add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
4257     }
4258   } else {
4259     assert (CompressedOops::base() == NULL, "sanity");
4260     if (dst != src) {
4261       mov(dst, src);
4262     }
4263   }
4264 }
4265 
4266 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);
4267 
4268 // Returns a static string
4269 const char* MacroAssembler::describe_klass_decode_mode(MacroAssembler::KlassDecodeMode mode) {
4270   switch (mode) {
4271   case KlassDecodeNone: return "none";
4272   case KlassDecodeZero: return "zero";
4273   case KlassDecodeXor:  return "xor";
4274   case KlassDecodeMovk: return "movk";
4275   default:
4276     ShouldNotReachHere();
4277   }
4278   return NULL;
4279 }
4280 
4281 // Return the current narrow Klass pointer decode mode.
4282 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
4283   if (_klass_decode_mode == KlassDecodeNone) {
4284     // First time initialization
4285     assert(UseCompressedClassPointers, "not using compressed class pointers");
4286     assert(Metaspace::initialized(), "metaspace not initialized yet");
4287 
4288     _klass_decode_mode = klass_decode_mode_for_base(CompressedKlassPointers::base());
4289     guarantee(_klass_decode_mode != KlassDecodeNone,
4290               PTR_FORMAT " is not a valid encoding base on aarch64",
4291               p2i(CompressedKlassPointers::base()));
4292     log_info(metaspace)("klass decode mode initialized: %s", describe_klass_decode_mode(_klass_decode_mode));
4293   }
4294   return _klass_decode_mode;
4295 }
4296 
4297 // Given an arbitrary base address, return the KlassDecodeMode that would be used. Return KlassDecodeNone
4298 // if base address is not valid for encoding.
4299 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode_for_base(address base) {
4300   assert(CompressedKlassPointers::shift() != 0, "not lilliput?");
4301 
4302   const uint64_t base_u64 = (uint64_t) base;
4303 
4304   if (base_u64 == 0) {
4305     return KlassDecodeZero;
4306   }
4307 
4308   if (operand_valid_for_logical_immediate(false, base_u64) &&
4309       ((base_u64 & (KlassEncodingMetaspaceMax - 1)) == 0)) {
4310     return KlassDecodeXor;




4311   }
4312 
4313   const uint64_t shifted_base = base_u64 >> CompressedKlassPointers::shift();
4314   if ((shifted_base & 0xffff0000ffffffff) == 0) {
4315     return KlassDecodeMovk;
4316   }
4317 
4318   return KlassDecodeNone;
4319 }
4320 
4321 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
4322   assert (UseCompressedClassPointers, "should only be used for compressed headers");
4323   assert(CompressedKlassPointers::shift() != 0, "not lilliput?");
4324   switch (klass_decode_mode()) {
4325   case KlassDecodeZero:
4326     lsr(dst, src, LogKlassAlignmentInBytes);




4327     break;
4328 
4329   case KlassDecodeXor:
4330     eor(dst, src, (uint64_t)CompressedKlassPointers::base());
4331     lsr(dst, dst, LogKlassAlignmentInBytes);




4332     break;
4333 
4334   case KlassDecodeMovk:
4335     ubfx(dst, src, LogKlassAlignmentInBytes, MaxNarrowKlassPointerBits);




4336     break;
4337 
4338   case KlassDecodeNone:
4339     ShouldNotReachHere();
4340     break;
4341   }
4342 }
4343 
4344 void MacroAssembler::encode_klass_not_null(Register r) {
4345   encode_klass_not_null(r, r);
4346 }
4347 
4348 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
4349   assert (UseCompressedClassPointers, "should only be used for compressed headers");
4350 
4351   assert(CompressedKlassPointers::shift() != 0, "not lilliput?");
4352 
4353   switch (klass_decode_mode()) {
4354   case KlassDecodeZero:
4355     if (dst != src) mov(dst, src);




4356     break;
4357 
4358   case KlassDecodeXor:
4359     lsl(dst, src, LogKlassAlignmentInBytes);
4360     eor(dst, dst, (uint64_t)CompressedKlassPointers::base());




4361     break;
4362 
4363   case KlassDecodeMovk: {
4364     const uint64_t shifted_base =
4365       (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
4366 
4367     // Invalid base should have been gracefully handled via klass_decode_mode() in VM initialization.
4368     assert((shifted_base & 0xffff0000ffffffff) == 0, "incompatible base");
4369 
4370     if (dst != src) movw(dst, src);
4371     movk(dst, shifted_base >> 32, 32);
4372     lsl(dst, dst, LogKlassAlignmentInBytes);




4373     break;
4374   }
4375 
4376   case KlassDecodeNone:
4377     ShouldNotReachHere();
4378     break;
4379   }
4380 }
4381 
4382 void  MacroAssembler::decode_klass_not_null(Register r) {
4383   decode_klass_not_null(r, r);
4384 }
4385 
4386 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
4387 #ifdef ASSERT
4388   {
4389     ThreadInVMfromUnknown tiv;
4390     assert (UseCompressedOops, "should only be used for compressed oops");
4391     assert (Universe::heap() != NULL, "java heap should be initialized");
4392     assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");

5934   }
5935 }
5936 
5937 
5938 // A double move
5939 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
5940  if (src.first()->is_stack()) {
5941     if (dst.first()->is_stack()) {
5942       ldr(tmp, Address(rfp, reg2offset_in(src.first())));
5943       str(tmp, Address(sp, reg2offset_out(dst.first())));
5944     } else {
5945       ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
5946     }
5947   } else if (src.first() != dst.first()) {
5948     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
5949       fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
5950     else
5951       strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
5952   }
5953 }
5954 
5955 // Attempt to fast-lock an object. Fall-through on success, branch to slow label
5956 // on failure.
5957 // Registers:
5958 //  - obj: the object to be locked
5959 //  - hdr: the header, already loaded from obj, will be destroyed
5960 //  - t1, t2, t3: temporary registers, will be destroyed
5961 void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Register t2, Register t3, Label& slow) {
5962   // Check if we would have space on lock-stack for the object.
5963   ldr(t1, Address(rthread, Thread::lock_stack_current_offset()));
5964   ldr(t2, Address(rthread, Thread::lock_stack_limit_offset()));
5965   cmp(t1, t2);
5966   br(Assembler::GE, slow);
5967 
5968   // Load (object->mark() | 1) into hdr
5969   orr(hdr, hdr, markWord::unlocked_value);
5970   // Clear lock-bits, into t2
5971   eor(t2, hdr, markWord::unlocked_value);
5972   // Try to swing header from unlocked to locked
5973   cmpxchg(/*addr*/ obj, /*expected*/ hdr, /*new*/ t2, Assembler::xword,
5974           /*acquire*/ true, /*release*/ true, /*weak*/ false, t3);
5975   br(Assembler::NE, slow);
5976 
5977   // After successful lock, push object on lock-stack
5978   str(obj, Address(t1, 0));
5979   add(t1, t1, oopSize);
5980   str(t1, Address(rthread, Thread::lock_stack_current_offset()));
5981 }
5982 
5983 void MacroAssembler::fast_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
5984   // Load the expected old header (lock-bits cleared to indicate 'locked') into hdr
5985   andr(hdr, hdr, ~markWord::lock_mask_in_place);
5986 
5987   // Load the new header (unlocked) into t1
5988   orr(t1, hdr, markWord::unlocked_value);
5989 
5990   // Try to swing header from locked to unlocked
5991   cmpxchg(obj, hdr, t1, Assembler::xword,
5992           /*acquire*/ true, /*release*/ true, /*weak*/ false, t2);
5993   br(Assembler::NE, slow);
5994 
5995   // After successful unlock, pop object from lock-stack
5996   ldr(t1, Address(rthread, Thread::lock_stack_current_offset()));
5997   sub(t1, t1, oopSize);
5998   str(t1, Address(rthread, Thread::lock_stack_current_offset()));
5999 }
< prev index next >