< prev index next > src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
Print this page
movptr(rscratch2, (intptr_t)Universe::non_oop_word());
return trampoline_call(Address(entry, rh));
}
int MacroAssembler::ic_check_size() {
if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) {
! return NativeInstruction::instruction_size * 7;
} else {
! return NativeInstruction::instruction_size * 5;
}
}
int MacroAssembler::ic_check(int end_alignment) {
Register receiver = j_rarg0;
movptr(rscratch2, (intptr_t)Universe::non_oop_word());
return trampoline_call(Address(entry, rh));
}
int MacroAssembler::ic_check_size() {
+ int extra_instructions = UseCompactObjectHeaders ? 1 : 0;
if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) {
! return NativeInstruction::instruction_size * (7 + extra_instructions);
} else {
! return NativeInstruction::instruction_size * (5 + extra_instructions);
}
}
int MacroAssembler::ic_check(int end_alignment) {
Register receiver = j_rarg0;
// before the inline cache check here, and not after
align(end_alignment, offset() + ic_check_size());
int uep_offset = offset();
! if (UseCompressedClassPointers) {
ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
cmpw(tmp1, tmp2);
} else {
ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
// before the inline cache check here, and not after
align(end_alignment, offset() + ic_check_size());
int uep_offset = offset();
! if (UseCompactObjectHeaders) {
+ load_nklass_compact(tmp1, receiver);
+ ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
+ cmpw(tmp1, tmp2);
+ } else if (UseCompressedClassPointers) {
ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
cmpw(tmp1, tmp2);
} else {
ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
}
void MacroAssembler::load_klass(Register dst, Register src) {
! if (UseCompressedClassPointers) {
ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
decode_klass_not_null(dst);
} else {
ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
}
ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
}
+ // Loads the obj's Klass* into dst.
+ // Preserves all registers (incl src, rscratch1 and rscratch2).
+ // Input:
+ // src - the oop we want to load the klass from.
+ // dst - output nklass.
+ void MacroAssembler::load_nklass_compact(Register dst, Register src) {
+ assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders");
+ ldr(dst, Address(src, oopDesc::mark_offset_in_bytes()));
+ lsr(dst, dst, markWord::klass_shift);
+ }
+
void MacroAssembler::load_klass(Register dst, Register src) {
! if (UseCompactObjectHeaders) {
+ load_nklass_compact(dst, src);
+ decode_klass_not_null(dst);
+ } else if (UseCompressedClassPointers) {
ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
decode_klass_not_null(dst);
} else {
ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
}
ldr(dst, Address(dst, mirror_offset));
resolve_oop_handle(dst, tmp1, tmp2);
}
void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
if (UseCompressedClassPointers) {
! ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
if (CompressedKlassPointers::base() == nullptr) {
cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
return;
} else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
&& CompressedKlassPointers::shift() == 0) {
ldr(dst, Address(dst, mirror_offset));
resolve_oop_handle(dst, tmp1, tmp2);
}
void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
+ assert_different_registers(oop, trial_klass, tmp);
if (UseCompressedClassPointers) {
! if (UseCompactObjectHeaders) {
+ load_nklass_compact(tmp, oop);
+ } else {
+ ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
+ }
if (CompressedKlassPointers::base() == nullptr) {
cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
return;
} else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
&& CompressedKlassPointers::shift() == 0) {
ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
}
cmp(trial_klass, tmp);
}
+ void MacroAssembler::cmp_klass(Register src, Register dst, Register tmp1, Register tmp2) {
+ if (UseCompactObjectHeaders) {
+ load_nklass_compact(tmp1, src);
+ load_nklass_compact(tmp2, dst);
+ cmpw(tmp1, tmp2);
+ } else if (UseCompressedClassPointers) {
+ ldrw(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
+ ldrw(tmp2, Address(dst, oopDesc::klass_offset_in_bytes()));
+ cmpw(tmp1, tmp2);
+ } else {
+ ldr(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
+ ldr(tmp2, Address(dst, oopDesc::klass_offset_in_bytes()));
+ cmp(tmp1, tmp2);
+ }
+ }
+
void MacroAssembler::store_klass(Register dst, Register src) {
// FIXME: Should this be a store release? concurrent gcs assumes
// klass length is valid if klass field is not null.
+ assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
encode_klass_not_null(src);
strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
} else {
str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
}
}
void MacroAssembler::store_klass_gap(Register dst, Register src) {
+ assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
// Store to klass gap in destination
strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
}
}
if (_klass_decode_mode != KlassDecodeNone) {
return _klass_decode_mode;
}
- assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift()
- || 0 == CompressedKlassPointers::shift(), "decode alg wrong");
-
if (CompressedKlassPointers::base() == nullptr) {
return (_klass_decode_mode = KlassDecodeZero);
}
if (operand_valid_for_logical_immediate(
void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
switch (klass_decode_mode()) {
case KlassDecodeZero:
if (CompressedKlassPointers::shift() != 0) {
! lsr(dst, src, LogKlassAlignmentInBytes);
} else {
if (dst != src) mov(dst, src);
}
break;
case KlassDecodeXor:
if (CompressedKlassPointers::shift() != 0) {
eor(dst, src, (uint64_t)CompressedKlassPointers::base());
! lsr(dst, dst, LogKlassAlignmentInBytes);
} else {
eor(dst, src, (uint64_t)CompressedKlassPointers::base());
}
break;
case KlassDecodeMovk:
if (CompressedKlassPointers::shift() != 0) {
! ubfx(dst, src, LogKlassAlignmentInBytes, 32);
} else {
movw(dst, src);
}
break;
void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
switch (klass_decode_mode()) {
case KlassDecodeZero:
if (CompressedKlassPointers::shift() != 0) {
! lsr(dst, src, CompressedKlassPointers::shift());
} else {
if (dst != src) mov(dst, src);
}
break;
case KlassDecodeXor:
if (CompressedKlassPointers::shift() != 0) {
eor(dst, src, (uint64_t)CompressedKlassPointers::base());
! lsr(dst, dst, CompressedKlassPointers::shift());
} else {
eor(dst, src, (uint64_t)CompressedKlassPointers::base());
}
break;
case KlassDecodeMovk:
if (CompressedKlassPointers::shift() != 0) {
! ubfx(dst, src, CompressedKlassPointers::shift(), 32);
} else {
movw(dst, src);
}
break;
assert (UseCompressedClassPointers, "should only be used for compressed headers");
switch (klass_decode_mode()) {
case KlassDecodeZero:
if (CompressedKlassPointers::shift() != 0) {
! lsl(dst, src, LogKlassAlignmentInBytes);
} else {
if (dst != src) mov(dst, src);
}
break;
case KlassDecodeXor:
if (CompressedKlassPointers::shift() != 0) {
! lsl(dst, src, LogKlassAlignmentInBytes);
eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
} else {
eor(dst, src, (uint64_t)CompressedKlassPointers::base());
}
break;
assert (UseCompressedClassPointers, "should only be used for compressed headers");
switch (klass_decode_mode()) {
case KlassDecodeZero:
if (CompressedKlassPointers::shift() != 0) {
! lsl(dst, src, CompressedKlassPointers::shift());
} else {
if (dst != src) mov(dst, src);
}
break;
case KlassDecodeXor:
if (CompressedKlassPointers::shift() != 0) {
! lsl(dst, src, CompressedKlassPointers::shift());
eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
} else {
eor(dst, src, (uint64_t)CompressedKlassPointers::base());
}
break;
if (dst != src) movw(dst, src);
movk(dst, shifted_base >> 32, 32);
if (CompressedKlassPointers::shift() != 0) {
! lsl(dst, dst, LogKlassAlignmentInBytes);
}
break;
}
if (dst != src) movw(dst, src);
movk(dst, shifted_base >> 32, 32);
if (CompressedKlassPointers::shift() != 0) {
! lsl(dst, dst, CompressedKlassPointers::shift());
}
break;
}
// Implements lightweight-locking.
//
// - obj: the object to be locked
// - t1, t2, t3: temporary registers, will be destroyed
// - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
! void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
! assert_different_registers(obj, t1, t2, t3, rscratch1);
Label push;
const Register top = t1;
const Register mark = t2;
const Register t = t3;
// Preload the markWord. It is important that this is the first
// instruction emitted as it is part of C1's null check semantics.
ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
// Check if the lock-stack is full.
ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
cmpw(top, (unsigned)LockStack::end_offset());
br(Assembler::GE, slow);
// Implements lightweight-locking.
//
// - obj: the object to be locked
// - t1, t2, t3: temporary registers, will be destroyed
// - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
! void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
! assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1);
Label push;
const Register top = t1;
const Register mark = t2;
const Register t = t3;
// Preload the markWord. It is important that this is the first
// instruction emitted as it is part of C1's null check semantics.
ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+ if (UseObjectMonitorTable) {
+ // Clear cache in case fast locking succeeds.
+ str(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))));
+ }
+
// Check if the lock-stack is full.
ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
cmpw(top, (unsigned)LockStack::end_offset());
br(Assembler::GE, slow);
< prev index next >