< prev index next > src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
Print this page
#include "gc/shared/tlab_globals.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "compiler/compileTask.hpp"
#include "compiler/disassembler.hpp"
+ #include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/accessDecorators.hpp"
+ #include "oops/compressedKlass.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/klass.inline.hpp"
#include "runtime/icache.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
ldr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
}
- void MacroAssembler::load_klass(Register dst, Register src) {
- if (UseCompressedClassPointers) {
- ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
- decode_klass_not_null(dst);
- } else {
- ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+ // Loads the obj's Klass* into dst.
+ // src and dst must be distinct registers
+ // Preserves all registers (incl src, rscratch1 and rscratch2), but clobbers condition flags
+ void MacroAssembler::load_nklass(Register dst, Register src) {
+ assert(UseCompressedClassPointers, "expects UseCompressedClassPointers");
+
+ assert_different_registers(src, dst);
+
+ Label slow, done;
+
+ // Check if we can take the (common) fast path, if obj is unlocked.
+ ldr(dst, Address(src, oopDesc::mark_offset_in_bytes()));
+ eor(dst, dst, markWord::unlocked_value);
+ tst(dst, markWord::lock_mask_in_place);
+ br(Assembler::NE, slow);
+
+ // Fast-path: shift and decode Klass*.
+ lsr(dst, dst, markWord::klass_shift);
+ b(done);
+
+ bind(slow);
+ RegSet saved_regs = RegSet::of(lr);
+ // We need r0 as argument and return register for the call. Preserve it, if necessary.
+ if (dst != r0) {
+ saved_regs += RegSet::of(r0);
+ }
+ push(saved_regs, sp);
+ mov(r0, src);
+ assert(StubRoutines::load_nklass() != NULL, "Must have stub");
+ far_call(RuntimeAddress(StubRoutines::load_nklass()));
+ if (dst != r0) {
+ mov(dst, r0);
}
+ pop(saved_regs, sp);
+ bind(done);
+ }
+
+ void MacroAssembler::load_klass(Register dst, Register src) {
+ load_nklass(dst, src);
+ decode_klass_not_null(dst);
}
// ((OopHandle)result).resolve();
void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
// OopHandle::resolve is an indirection.
ldr(dst, Address(dst, mirror_offset));
resolve_oop_handle(dst, tmp);
}
void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
- if (UseCompressedClassPointers) {
- ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
- if (CompressedKlassPointers::base() == NULL) {
- cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
- return;
- } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
- && CompressedKlassPointers::shift() == 0) {
- // Only the bottom 32 bits matter
- cmpw(trial_klass, tmp);
- return;
- }
- decode_klass_not_null(tmp);
- } else {
- ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
+ assert(UseCompressedClassPointers, "Lilliput");
+ load_nklass(tmp, oop);
+ if (CompressedKlassPointers::base() == NULL) {
+ cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
+ return;
+ } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
+ && CompressedKlassPointers::shift() == 0) {
+ // Only the bottom 32 bits matter
+ cmpw(trial_klass, tmp);
+ return;
}
+ decode_klass_not_null(tmp);
cmp(trial_klass, tmp);
}
- void MacroAssembler::store_klass(Register dst, Register src) {
- // FIXME: Should this be a store release? concurrent gcs assumes
- // klass length is valid if klass field is not null.
- if (UseCompressedClassPointers) {
- encode_klass_not_null(src);
- strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
- } else {
- str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
- }
- }
-
- void MacroAssembler::store_klass_gap(Register dst, Register src) {
- if (UseCompressedClassPointers) {
- // Store to klass gap in destination
- strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
- }
- }
-
// Algorithm must match CompressedOops::encode.
void MacroAssembler::encode_heap_oop(Register d, Register s) {
#ifdef ASSERT
verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
#endif
}
}
MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);
+ // Returns a static string
+ const char* MacroAssembler::describe_klass_decode_mode(MacroAssembler::KlassDecodeMode mode) {
+ switch (mode) {
+ case KlassDecodeNone: return "none";
+ case KlassDecodeZero: return "zero";
+ case KlassDecodeXor: return "xor";
+ case KlassDecodeMovk: return "movk";
+ default:
+ ShouldNotReachHere();
+ }
+ return NULL;
+ }
+
+ // Return the current narrow Klass pointer decode mode.
MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
- assert(UseCompressedClassPointers, "not using compressed class pointers");
- assert(Metaspace::initialized(), "metaspace not initialized yet");
+ if (_klass_decode_mode == KlassDecodeNone) {
+ // First time initialization
+ assert(UseCompressedClassPointers, "not using compressed class pointers");
+ assert(Metaspace::initialized(), "metaspace not initialized yet");
- if (_klass_decode_mode != KlassDecodeNone) {
- return _klass_decode_mode;
+ _klass_decode_mode = klass_decode_mode_for_base(CompressedKlassPointers::base());
+ guarantee(_klass_decode_mode != KlassDecodeNone,
+ PTR_FORMAT " is not a valid encoding base on aarch64",
+ p2i(CompressedKlassPointers::base()));
+ log_info(metaspace)("klass decode mode initialized: %s", describe_klass_decode_mode(_klass_decode_mode));
}
+ return _klass_decode_mode;
+ }
- assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift()
- || 0 == CompressedKlassPointers::shift(), "decode alg wrong");
+ // Given an arbitrary base address, return the KlassDecodeMode that would be used. Return KlassDecodeNone
+ // if base address is not valid for encoding.
+ MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode_for_base(address base) {
+ assert(CompressedKlassPointers::shift() != 0, "not lilliput?");
- if (CompressedKlassPointers::base() == NULL) {
- return (_klass_decode_mode = KlassDecodeZero);
+ const uint64_t base_u64 = (uint64_t) base;
+
+ if (base_u64 == 0) {
+ return KlassDecodeZero;
}
- if (operand_valid_for_logical_immediate(
- /*is32*/false, (uint64_t)CompressedKlassPointers::base())) {
- const uint64_t range_mask =
- (1ULL << log2i(CompressedKlassPointers::range())) - 1;
- if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) {
- return (_klass_decode_mode = KlassDecodeXor);
- }
+ if (operand_valid_for_logical_immediate(false, base_u64) &&
+ ((base_u64 & (KlassEncodingMetaspaceMax - 1)) == 0)) {
+ return KlassDecodeXor;
}
- const uint64_t shifted_base =
- (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
- guarantee((shifted_base & 0xffff0000ffffffff) == 0,
- "compressed class base bad alignment");
+ const uint64_t shifted_base = base_u64 >> CompressedKlassPointers::shift();
+ if ((shifted_base & 0xffff0000ffffffff) == 0) {
+ return KlassDecodeMovk;
+ }
- return (_klass_decode_mode = KlassDecodeMovk);
+ return KlassDecodeNone;
}
void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
+ assert (UseCompressedClassPointers, "should only be used for compressed headers");
+ assert(CompressedKlassPointers::shift() != 0, "not lilliput?");
switch (klass_decode_mode()) {
case KlassDecodeZero:
- if (CompressedKlassPointers::shift() != 0) {
- lsr(dst, src, LogKlassAlignmentInBytes);
- } else {
- if (dst != src) mov(dst, src);
- }
+ lsr(dst, src, LogKlassAlignmentInBytes);
break;
case KlassDecodeXor:
- if (CompressedKlassPointers::shift() != 0) {
- eor(dst, src, (uint64_t)CompressedKlassPointers::base());
- lsr(dst, dst, LogKlassAlignmentInBytes);
- } else {
- eor(dst, src, (uint64_t)CompressedKlassPointers::base());
- }
+ eor(dst, src, (uint64_t)CompressedKlassPointers::base());
+ lsr(dst, dst, LogKlassAlignmentInBytes);
break;
case KlassDecodeMovk:
- if (CompressedKlassPointers::shift() != 0) {
- ubfx(dst, src, LogKlassAlignmentInBytes, 32);
- } else {
- movw(dst, src);
- }
+ ubfx(dst, src, LogKlassAlignmentInBytes, MaxNarrowKlassPointerBits);
break;
case KlassDecodeNone:
ShouldNotReachHere();
break;
}
void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
+ assert(CompressedKlassPointers::shift() != 0, "not lilliput?");
+
switch (klass_decode_mode()) {
case KlassDecodeZero:
- if (CompressedKlassPointers::shift() != 0) {
- lsl(dst, src, LogKlassAlignmentInBytes);
- } else {
- if (dst != src) mov(dst, src);
- }
+ if (dst != src) mov(dst, src);
break;
case KlassDecodeXor:
- if (CompressedKlassPointers::shift() != 0) {
- lsl(dst, src, LogKlassAlignmentInBytes);
- eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
- } else {
- eor(dst, src, (uint64_t)CompressedKlassPointers::base());
- }
+ lsl(dst, src, LogKlassAlignmentInBytes);
+ eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
break;
case KlassDecodeMovk: {
const uint64_t shifted_base =
(uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
+ // Invalid base should have been gracefully handled via klass_decode_mode() in VM initialization.
+ assert((shifted_base & 0xffff0000ffffffff) == 0, "incompatible base");
+
if (dst != src) movw(dst, src);
movk(dst, shifted_base >> 32, 32);
-
- if (CompressedKlassPointers::shift() != 0) {
- lsl(dst, dst, LogKlassAlignmentInBytes);
- }
-
+ lsl(dst, dst, LogKlassAlignmentInBytes);
break;
}
case KlassDecodeNone:
ShouldNotReachHere();
< prev index next >