< prev index next > src/hotspot/cpu/s390/macroAssembler_s390.cpp
Print this page
z_bru(done);
}
bind(object_has_monitor);
! Register zero = temp;
! Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
! // The object's monitor m is unlocked iff m->owner is null,
! // otherwise m->owner may contain a thread or a stack address.
!
! // Try to CAS m->owner from null to current thread.
! // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
! // Otherwise, register zero is filled with the current owner.
! z_lghi(zero, 0);
! z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
! if (LockingMode != LM_LIGHTWEIGHT) {
! // Store a non-null value into the box.
! z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
! }
!
! z_bre(done); // acquired the lock for the first time.
!
! BLOCK_COMMENT("fast_path_recursive_lock {");
! // Check if we are already the owner (recursive lock)
! z_cgr(Z_thread, zero); // owner is stored in zero by "z_csg" above
! z_brne(done); // not a recursive lock
!
! // Current thread already owns the lock. Just increment recursion count.
! z_agsi(Address(monitor_tagged, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 1ll);
! z_cgr(zero, zero); // set the CC to EQUAL
! BLOCK_COMMENT("} fast_path_recursive_lock");
bind(done);
BLOCK_COMMENT("} compiler_fast_lock_object");
// If locking was successful, CR should indicate 'EQ'.
// The compiler or the native wrapper generates a branch to the runtime call
z_bru(done);
}
bind(object_has_monitor);
! if (!UseObjectMonitorTable) {
! Register zero = temp;
! Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
! // The object's monitor m is unlocked iff m->owner is null,
! // otherwise m->owner may contain a thread or a stack address.
!
! // Try to CAS m->owner from null to current thread.
! // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
! // Otherwise, register zero is filled with the current owner.
! z_lghi(zero, 0);
! z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
! if (LockingMode != LM_LIGHTWEIGHT) {
! // Store a non-null value into the box.
! z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
! }
!
! z_bre(done); // acquired the lock for the first time.
!
! BLOCK_COMMENT("fast_path_recursive_lock {");
! // Check if we are already the owner (recursive lock)
! z_cgr(Z_thread, zero); // owner is stored in zero by "z_csg" above
! z_brne(done); // not a recursive lock
!
! // Current thread already owns the lock. Just increment recursion count.
! z_agsi(Address(monitor_tagged, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 1ll);
! z_cgr(zero, zero); // set the CC to EQUAL
+ BLOCK_COMMENT("} fast_path_recursive_lock");
+ } else {
+ // OMCache lookup not supported yet. Take the slowpath.
+ // Set flag to NE
+ z_ltgr(oop, oop);
+ z_bru(done);
+ }
bind(done);
BLOCK_COMMENT("} compiler_fast_lock_object");
// If locking was successful, CR should indicate 'EQ'.
// The compiler or the native wrapper generates a branch to the runtime call
// This is handled like owner thread mismatches: We take the slow path.
// Handle existing monitor.
bind(object_has_monitor);
! z_cg(Z_thread, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
! z_brne(done);
! BLOCK_COMMENT("fast_path_recursive_unlock {");
! load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
! z_bre(not_recursive); // if 0 then jump, it's not recursive locking
! // Recursive inflated unlock
! z_agsi(Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), -1ll);
! z_cgr(currentHeader, currentHeader); // set the CC to EQUAL
! BLOCK_COMMENT("} fast_path_recursive_unlock");
! z_bru(done);
! bind(not_recursive);
! load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
! z_brne(done);
! load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
! z_brne(done);
! z_release();
! z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);
bind(done);
BLOCK_COMMENT("} compiler_fast_unlock_object");
// flag == EQ indicates success
// This is handled like owner thread mismatches: We take the slow path.
// Handle existing monitor.
bind(object_has_monitor);
! if (!UseObjectMonitorTable) {
! z_cg(Z_thread, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+ z_brne(done);
! BLOCK_COMMENT("fast_path_recursive_unlock {");
! load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
! z_bre(not_recursive); // if 0 then jump, it's not recursive locking
! // Recursive inflated unlock
! z_agsi(Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), -1ll);
! z_cgr(currentHeader, currentHeader); // set the CC to EQUAL
! BLOCK_COMMENT("} fast_path_recursive_unlock");
! z_bru(done);
! bind(not_recursive);
! load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
! z_brne(done);
! load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
! z_brne(done);
! z_release();
! z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);
+ } else {
+ // OMCache lookup not supported yet. Take the slowpath.
+ // Set flag to NE
+ z_ltgr(oop, oop);
+ z_bru(done);
+ }
bind(done);
BLOCK_COMMENT("} compiler_fast_unlock_object");
// flag == EQ indicates success
BLOCK_COMMENT("cKlass encoder {");
#ifdef ASSERT
Label ok;
! z_tmll(current, KlassAlignmentInBytes-1); // Check alignment.
z_brc(Assembler::bcondAllZero, ok);
// The plain disassembler does not recognize illtrap. It instead displays
// a 32-bit value. Issuing two illtraps assures the disassembler finds
// the proper beginning of the next instruction.
z_illtrap(0xee);
BLOCK_COMMENT("cKlass encoder {");
#ifdef ASSERT
Label ok;
! z_tmll(current, CompressedKlassPointers::klass_alignment_in_bytes() - 1); // Check alignment.
z_brc(Assembler::bcondAllZero, ok);
// The plain disassembler does not recognize illtrap. It instead displays
// a 32-bit value. Issuing two illtraps assures the disassembler finds
// the proper beginning of the next instruction.
z_illtrap(0xee);
// Scale down the incoming klass pointer first.
// We then can be sure we calculate an offset that fits into 32 bit.
// More generally speaking: all subsequent calculations are purely 32-bit.
if (shift != 0) {
- assert (LogKlassAlignmentInBytes == shift, "decode alg wrong");
z_srlg(dst, current, shift);
current = dst;
}
if (base != nullptr) {
}
}
#ifdef ASSERT
Label ok;
! z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
z_brc(Assembler::bcondAllZero, ok);
// The plain disassembler does not recognize illtrap. It instead displays
// a 32-bit value. Issuing two illtraps assures the disassembler finds
// the proper beginning of the next instruction.
z_illtrap(0xd1);
}
}
#ifdef ASSERT
Label ok;
! z_tmll(dst, CompressedKlassPointers::klass_alignment_in_bytes() - 1); // Check alignment.
z_brc(Assembler::bcondAllZero, ok);
// The plain disassembler does not recognize illtrap. It instead displays
// a 32-bit value. Issuing two illtraps assures the disassembler finds
// the proper beginning of the next instruction.
z_illtrap(0xd1);
}
}
#ifdef ASSERT
Label ok;
! z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
z_brc(Assembler::bcondAllZero, ok);
// The plain disassembler does not recognize illtrap. It instead displays
// a 32-bit value. Issuing two illtraps assures the disassembler finds
// the proper beginning of the next instruction.
z_illtrap(0xd2);
}
}
#ifdef ASSERT
Label ok;
! z_tmll(dst, CompressedKlassPointers::klass_alignment_in_bytes() - 1); // Check alignment.
z_brc(Assembler::bcondAllZero, ok);
// The plain disassembler does not recognize illtrap. It instead displays
// a 32-bit value. Issuing two illtraps assures the disassembler finds
// the proper beginning of the next instruction.
z_illtrap(0xd2);
if (UseCompressedClassPointers) {
const int shift = CompressedKlassPointers::shift();
address base = CompressedKlassPointers::base();
! assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift");
assert_different_registers(Rop1, Z_R0);
assert_different_registers(Rop1, Rbase, Z_R1);
// First encode register oop and then compare with cOop in memory.
// This sequence saves an unnecessary cOop load and decode.
if (UseCompressedClassPointers) {
const int shift = CompressedKlassPointers::shift();
address base = CompressedKlassPointers::base();
! if (CompressedKlassPointers::tiny_classpointer_mode()) {
+ assert(shift >= 3, "cKlass encoder detected bad shift");
+ } else {
+ assert((shift == 0) || (shift == 3), "cKlass encoder detected bad shift");
+ }
assert_different_registers(Rop1, Z_R0);
assert_different_registers(Rop1, Rbase, Z_R1);
// First encode register oop and then compare with cOop in memory.
// This sequence saves an unnecessary cOop load and decode.
< prev index next >