< prev index next > src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
Print this page
}
__ allocate_array(op->obj()->as_register(),
len,
tmp1,
tmp2,
- arrayOopDesc::header_size(op->type()),
+ arrayOopDesc::base_offset_in_bytes(op->type()),
array_element_size(op->type()),
op->klass()->as_register(),
*op->stub()->entry());
}
__ bind(*op->stub()->continuation());
Register src_pos = op->src_pos()->as_register();
Register dst_pos = op->dst_pos()->as_register();
Register length = op->length()->as_register();
Register tmp = op->tmp()->as_register();
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
+ Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg;
CodeStub* stub = op->stub();
int flags = op->flags();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (is_reference_type(basic_type)) basic_type = T_OBJECT;
if (flags & LIR_OpArrayCopy::type_check) {
// We don't know the array types are compatible
if (basic_type != T_OBJECT) {
// Simple test for basic type arrays
- if (UseCompressedClassPointers) {
- __ movl(tmp, src_klass_addr);
- __ cmpl(tmp, dst_klass_addr);
- } else {
- __ movptr(tmp, src_klass_addr);
- __ cmpptr(tmp, dst_klass_addr);
- }
+ __ cmp_klass(src, dst, tmp, tmp2);
__ jcc(Assembler::notEqual, *stub->entry());
} else {
// For object arrays, if src is a sub class of dst then we can
// safely do the copy.
Label cont, slow;
#ifdef _LP64
if (UseCompressedClassPointers) {
__ encode_klass_not_null(tmp, rscratch1);
}
#endif
-
if (basic_type != T_OBJECT) {
-
- if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
- else __ cmpptr(tmp, dst_klass_addr);
+ __ cmp_klass(tmp, dst, tmp2);
__ jcc(Assembler::notEqual, halt);
- if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr);
- else __ cmpptr(tmp, src_klass_addr);
+ __ cmp_klass(tmp, src, tmp2);
__ jcc(Assembler::equal, known_ok);
} else {
- if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
- else __ cmpptr(tmp, dst_klass_addr);
+ __ cmp_klass(tmp, dst, tmp2);
__ jcc(Assembler::equal, known_ok);
__ cmpptr(src, dst);
__ jcc(Assembler::equal, known_ok);
}
__ bind(halt);
void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register obj = op->obj_opr()->as_register(); // may not be an oop
Register hdr = op->hdr_opr()->as_register();
Register lock = op->lock_opr()->as_register();
- if (!UseFastLocking) {
+ if (LockingMode == LM_MONITOR) {
__ jmp(*op->stub()->entry());
} else if (op->code() == lir_lock) {
Register scratch = noreg;
- if (UseBiasedLocking) {
+ if (UseBiasedLocking || LockingMode == LM_LIGHTWEIGHT) {
scratch = op->scratch_opr()->as_register();
}
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// add debug info for NullPointerException only if one is possible
int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
if (info != NULL) {
add_debug_info_for_null_check_here(info);
}
#ifdef _LP64
- if (UseCompressedClassPointers) {
+ if (UseCompactObjectHeaders) {
+ Register tmp = rscratch1;
+ assert_different_registers(tmp, obj);
+ assert_different_registers(tmp, result);
+
+ // Check if we can take the (common) fast path, if obj is unlocked.
+ __ movq(result, Address(obj, oopDesc::mark_offset_in_bytes()));
+ __ testb(result, markWord::monitor_value);
+ __ jcc(Assembler::notZero, *op->stub()->entry());
+ __ bind(*op->stub()->continuation());
+ // Fast-path: shift and decode Klass*.
+ __ shrq(result, markWord::klass_shift);
+ __ decode_klass_not_null(result, tmp);
+ } else if (UseCompressedClassPointers) {
__ movl(result, Address(obj, oopDesc::klass_offset_in_bytes()));
__ decode_klass_not_null(result, rscratch1);
} else
#endif
__ movptr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
// atomic update to prevent overwriting Klass* with 0
__ lock();
__ orptr(mdo_addr, TypeEntries::null_seen);
}
if (do_update) {
- #ifndef ASSERT
- __ jmpb(next);
- }
- #else
__ jmp(next);
}
+ #ifdef ASSERT
} else {
__ testptr(obj, obj);
__ jcc(Assembler::notZero, update);
__ stop("unexpect null obj");
#endif
< prev index next >