< prev index next > src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
Print this page
}
if (LockingMode == LM_LIGHTWEIGHT) {
#ifdef _LP64
const Register thread = r15_thread;
+ lightweight_lock(disp_hdr, obj, hdr, thread, tmp, slow_case);
#else
- const Register thread = disp_hdr;
- get_thread(thread);
+ // Implicit null check.
+ movptr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
+ // Lacking registers and thread on x86_32. Always take slow path.
+ jmp(slow_case);
#endif
- lightweight_lock(obj, hdr, thread, tmp, slow_case);
} else if (LockingMode == LM_LEGACY) {
Label done;
// Load object header
movptr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
if (LockingMode == LM_LIGHTWEIGHT) {
#ifdef _LP64
lightweight_unlock(obj, disp_hdr, r15_thread, hdr, slow_case);
#else
- // This relies on the implementation of lightweight_unlock being able to handle
- // that the reg_rax and thread Register parameters may alias each other.
- get_thread(disp_hdr);
- lightweight_unlock(obj, disp_hdr, disp_hdr, hdr, slow_case);
+ // Lacking registers and thread on x86_32. Always take slow path.
+ jmp(slow_case);
#endif
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to
// the displaced header, get the object header instead
}
}
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
- assert_different_registers(obj, klass, len);
- movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast<int32_t>(markWord::prototype().value()));
+ assert_different_registers(obj, klass, len, t1, t2);
#ifdef _LP64
- if (UseCompressedClassPointers) { // Take care not to kill klass
+ if (UseCompactObjectHeaders) {
+ movptr(t1, Address(klass, Klass::prototype_header_offset()));
+ movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
+ } else if (UseCompressedClassPointers) { // Take care not to kill klass
+ movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast<int32_t>(markWord::prototype().value()));
movptr(t1, klass);
encode_klass_not_null(t1, rscratch1);
movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
} else
#endif
{
+ movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast<int32_t>(markWord::prototype().value()));
movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
}
if (len->is_valid()) {
movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
movl(Address(obj, base_offset), t1);
}
#endif
}
#ifdef _LP64
- else if (UseCompressedClassPointers) {
+ else if (UseCompressedClassPointers && !UseCompactObjectHeaders) {
xorptr(t1, t1);
store_klass_gap(obj, t1);
}
#endif
}
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, bool is_tlab_allocated) {
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
"con_size_in_bytes is not multiple of alignment");
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
-
+ if (UseCompactObjectHeaders) {
+ assert(hdr_size_in_bytes == 8, "check object headers size");
+ }
initialize_header(obj, klass, noreg, t1, t2);
if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) {
// clear rest of allocated space
const Register t1_zero = t1;
< prev index next >