< prev index next > src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
Print this page
/*
! * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
/*
! * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
- #include <sys/types.h>
-
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "ci/ciEnv.hpp"
#include "compiler/compileTask.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
+ #include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#endif
#ifdef COMPILER2
#include "opto/compile.hpp"
#include "opto/node.hpp"
#include "opto/output.hpp"
#endif
+ #include <sys/types.h>
+
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) block_comment(str)
#endif
ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
}
void MacroAssembler::load_klass(Register dst, Register src) {
! if (UseCompressedClassPointers) {
ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
decode_klass_not_null(dst);
} else {
ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
}
ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
}
+ // Loads the obj's Klass* into dst.
+ // Preserves all registers (incl src, rscratch1 and rscratch2).
+ void MacroAssembler::load_nklass_compact(Register dst, Register src) {
+ assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders");
+
+ Label fast;
+
+ // Check if we can take the (common) fast path, if obj is unlocked.
+ ldr(dst, Address(src, oopDesc::mark_offset_in_bytes()));
+ tbz(dst, exact_log2(markWord::monitor_value), fast);
+
+ // Fetch displaced header
+ ldr(dst, Address(dst, OM_OFFSET_NO_MONITOR_VALUE_TAG(header)));
+
+ // Fast-path: shift to get narrowKlass.
+ bind(fast);
+ lsr(dst, dst, markWord::klass_shift);
+ }
+
void MacroAssembler::load_klass(Register dst, Register src) {
! if (UseCompactObjectHeaders) {
+ load_nklass_compact(dst, src);
+ decode_klass_not_null(dst);
+ } else if (UseCompressedClassPointers) {
ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
decode_klass_not_null(dst);
} else {
ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
}
ldr(dst, Address(dst, mirror_offset));
resolve_oop_handle(dst, tmp1, tmp2);
}
void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
if (UseCompressedClassPointers) {
! ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
if (CompressedKlassPointers::base() == nullptr) {
cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
return;
} else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
&& CompressedKlassPointers::shift() == 0) {
ldr(dst, Address(dst, mirror_offset));
resolve_oop_handle(dst, tmp1, tmp2);
}
void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
+ assert_different_registers(oop, trial_klass, tmp);
if (UseCompressedClassPointers) {
! if (UseCompactObjectHeaders) {
+ load_nklass_compact(tmp, oop);
+ } else {
+ ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
+ }
if (CompressedKlassPointers::base() == nullptr) {
cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
return;
} else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
&& CompressedKlassPointers::shift() == 0) {
ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
}
cmp(trial_klass, tmp);
}
+ void MacroAssembler::cmp_klass(Register src, Register dst, Register tmp1, Register tmp2) {
+ if (UseCompactObjectHeaders) {
+ load_nklass_compact(tmp1, src);
+ load_nklass_compact(tmp2, dst);
+ cmpw(tmp1, tmp2);
+ } else if (UseCompressedClassPointers) {
+ ldrw(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
+ ldrw(tmp2, Address(dst, oopDesc::klass_offset_in_bytes()));
+ cmpw(tmp1, tmp2);
+ } else {
+ ldr(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
+ ldr(tmp2, Address(dst, oopDesc::klass_offset_in_bytes()));
+ cmp(tmp1, tmp2);
+ }
+ }
+
void MacroAssembler::store_klass(Register dst, Register src) {
// FIXME: Should this be a store release? concurrent gcs assumes
// klass length is valid if klass field is not null.
+ assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
encode_klass_not_null(src);
strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
} else {
str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
}
}
void MacroAssembler::store_klass_gap(Register dst, Register src) {
+ assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
// Store to klass gap in destination
strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
}
}
strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
}
}
// Implements lightweight-locking.
- // Branches to slow upon failure to lock the object, with ZF cleared.
- // Falls through upon success with ZF set.
//
// - obj: the object to be locked
! // - hdr: the header, already loaded from obj, will be destroyed
! // - t1, t2: temporary registers, will be destroyed
! void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
! assert_different_registers(obj, hdr, t1, t2, rscratch1);
!
! // Check if we would have space on lock-stack for the object.
! ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
! cmpw(t1, (unsigned)LockStack::end_offset() - 1);
! br(Assembler::GT, slow);
!
! // Load (object->mark() | 1) into hdr
! orr(hdr, hdr, markWord::unlocked_value);
! // Clear lock-bits, into t2
! eor(t2, hdr, markWord::unlocked_value);
! // Try to swing header from unlocked to locked
! // Clobbers rscratch1 when UseLSE is false
! cmpxchg(/*addr*/ obj, /*expected*/ hdr, /*new*/ t2, Assembler::xword,
! /*acquire*/ true, /*release*/ true, /*weak*/ false, t1);
br(Assembler::NE, slow);
! // After successful lock, push object on lock-stack
! ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
! str(obj, Address(rthread, t1));
! addw(t1, t1, oopSize);
! strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
}
// Implements lightweight-unlocking.
- // Branches to slow upon failure, with ZF cleared.
- // Falls through upon success, with ZF set.
//
// - obj: the object to be unlocked
! // - hdr: the (pre-loaded) header of the object
! // - t1, t2: temporary registers
! void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
! assert_different_registers(obj, hdr, t1, t2, rscratch1);
#ifdef ASSERT
{
- // The following checks rely on the fact that LockStack is only ever modified by
- // its owning thread, even if the lock got inflated concurrently; removal of LockStack
- // entries after inflation will happen delayed in that case.
-
// Check for lock-stack underflow.
Label stack_ok;
ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
cmpw(t1, (unsigned)LockStack::start_offset());
! br(Assembler::GT, stack_ok);
STOP("Lock-stack underflow");
bind(stack_ok);
}
- {
- // Check if the top of the lock-stack matches the unlocked object.
- Label tos_ok;
- subw(t1, t1, oopSize);
- ldr(t1, Address(rthread, t1));
- cmpoop(t1, obj);
- br(Assembler::EQ, tos_ok);
- STOP("Top of lock-stack does not match the unlocked object");
- bind(tos_ok);
- }
- {
- // Check that hdr is fast-locked.
- Label hdr_ok;
- tst(hdr, markWord::lock_mask_in_place);
- br(Assembler::EQ, hdr_ok);
- STOP("Header is not fast-locked");
- bind(hdr_ok);
- }
#endif
! // Load the new header (unlocked) into t1
! orr(t1, hdr, markWord::unlocked_value);
! // Try to swing header from locked to unlocked
! // Clobbers rscratch1 when UseLSE is false
! cmpxchg(obj, hdr, t1, Assembler::xword,
! /*acquire*/ true, /*release*/ true, /*weak*/ false, t2);
br(Assembler::NE, slow);
! // After successful unlock, pop object from lock-stack
! ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
! subw(t1, t1, oopSize);
#ifdef ASSERT
! str(zr, Address(rthread, t1));
#endif
! strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
}
strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
}
}
// Implements lightweight-locking.
//
// - obj: the object to be locked
! // - t1, t2, t3: temporary registers, will be destroyed
! // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
! void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
! assert_different_registers(obj, t1, t2, t3, rscratch1);
!
! Label push;
! const Register top = t1;
! const Register mark = t2;
! const Register t = t3;
!
! // Preload the markWord. It is important that this is the first
! // instruction emitted as it is part of C1's null check semantics.
! ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
!
! // Check if the lock-stack is full.
! ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
! cmpw(top, (unsigned)LockStack::end_offset());
! br(Assembler::GE, slow);
+
+ // Check for recursion.
+ subw(t, top, oopSize);
+ ldr(t, Address(rthread, t));
+ cmp(obj, t);
+ br(Assembler::EQ, push);
+
+ // Check header for monitor (0b10).
+ tst(mark, markWord::monitor_value);
br(Assembler::NE, slow);
! // Try to lock. Transition lock bits 0b01 => 0b00
! assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
! orr(mark, mark, markWord::unlocked_value);
! eor(t, mark, markWord::unlocked_value);
! cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword,
+ /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg);
+ br(Assembler::NE, slow);
+
+ bind(push);
+ // After successful lock, push object on lock-stack.
+ str(obj, Address(rthread, top));
+ addw(top, top, oopSize);
+ strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
}
// Implements lightweight-unlocking.
//
// - obj: the object to be unlocked
! // - t1, t2, t3: temporary registers
! // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding).
! void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
! // cmpxchg clobbers rscratch1.
+ assert_different_registers(obj, t1, t2, t3, rscratch1);
#ifdef ASSERT
{
// Check for lock-stack underflow.
Label stack_ok;
ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
cmpw(t1, (unsigned)LockStack::start_offset());
! br(Assembler::GE, stack_ok);
STOP("Lock-stack underflow");
bind(stack_ok);
}
#endif
! Label unlocked, push_and_slow;
! const Register top = t1;
+ const Register mark = t2;
+ const Register t = t3;
! // Check if obj is top of lock-stack.
! ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
! subw(top, top, oopSize);
! ldr(t, Address(rthread, top));
+ cmp(obj, t);
br(Assembler::NE, slow);
! // Pop lock-stack.
! DEBUG_ONLY(str(zr, Address(rthread, top));)
! strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
+
+ // Check if recursive.
+ subw(t, top, oopSize);
+ ldr(t, Address(rthread, t));
+ cmp(obj, t);
+ br(Assembler::EQ, unlocked);
+
+ // Not recursive. Check header for monitor (0b10).
+ ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+ tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow);
+
#ifdef ASSERT
! // Check header not unlocked (0b01).
+ Label not_unlocked;
+ tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked);
+ stop("lightweight_unlock already unlocked");
+ bind(not_unlocked);
#endif
!
+ // Try to unlock. Transition lock bits 0b00 => 0b01
+ assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
+ orr(t, mark, markWord::unlocked_value);
+ cmpxchg(obj, mark, t, Assembler::xword,
+ /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
+ br(Assembler::EQ, unlocked);
+
+ bind(push_and_slow);
+ // Restore lock-stack and handle the unlock in runtime.
+ DEBUG_ONLY(str(obj, Address(rthread, top));)
+ addw(top, top, oopSize);
+ strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
+ b(slow);
+
+ bind(unlocked);
}
< prev index next >