< prev index next > src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
Print this page
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArrayKlass.hpp"
+ #include "ci/ciInlineKlass.hpp"
#include "ci/ciInstance.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gc_globals.hpp"
#include "nativeInst_x86.hpp"
+ #include "oops/oop.inline.hpp"
#include "oops/objArrayKlass.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
__ push_reg(opr->as_register_lo());
} else if (opr->is_stack()) {
__ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
} else if (opr->is_constant()) {
LIR_Const* const_opr = opr->as_constant_ptr();
- if (const_opr->type() == T_OBJECT) {
+ if (const_opr->type() == T_OBJECT || const_opr->type() == T_PRIMITIVE_OBJECT) {
__ push_oop(const_opr->as_jobject(), rscratch1);
} else if (const_opr->type() == T_INT) {
__ push_jint(const_opr->as_jint());
} else {
ShouldNotReachHere();
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
__ mov(rax, rbx); // Restore the exception
}
// remove the activation and dispatch to the unwind handler
- __ remove_frame(initial_frame_size_in_bytes());
+ __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
// Emit the slow path assembly
if (stub != NULL) {
stub->emit_code(this);
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
assert(result->fpu() == 0, "result must already be on TOS");
}
+ ciMethod* method = compilation()->method();
+ if (InlineTypeReturnedAsFields && method->return_type()->is_inlinetype()) {
+ ciInlineKlass* vk = method->return_type()->as_inline_klass();
+ if (vk->can_be_returned_as_fields()) {
+ #ifndef _LP64
+ Unimplemented();
+ #else
+ address unpack_handler = vk->unpack_handler();
+ assert(unpack_handler != NULL, "must be");
+ __ call(RuntimeAddress(unpack_handler));
+ // At this point, rax points to the value object (for interpreter or C1 caller).
+ // The fields of the object are copied into registers (for C2 caller).
+ #endif
+ }
+ }
+
// Pop the stack before the safepoint code
- __ remove_frame(initial_frame_size_in_bytes());
+ __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check();
}
__ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
__ ret(0);
}
+ int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
+ return (__ store_inline_type_fields_to_buf(vk, false));
+ }
+
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
guarantee(info != NULL, "Shouldn't be NULL");
int offset = __ offset();
#ifdef _LP64
const Register poll_addr = rscratch1;
__ movptr(dest->as_register_hi(), c->as_jint_hi());
#endif // _LP64
break;
}
+ case T_PRIMITIVE_OBJECT: // Fall through
case T_OBJECT: {
if (patch_code != lir_patch_none) {
jobject2reg_with_patching(dest->as_register(), info);
} else {
__ movoop(dest->as_register(), c->as_jobject());
case T_ADDRESS:
__ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
break;
+ case T_PRIMITIVE_OBJECT: // Fall through
case T_OBJECT:
__ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1);
break;
case T_LONG: // fall through
case T_ADDRESS:
__ movptr(as_Address(addr), c->as_jint_bits());
break;
+ case T_PRIMITIVE_OBJECT: // fall through
case T_OBJECT: // fall through
case T_ARRAY:
if (c->as_jobject() == NULL) {
if (UseCompressedOops && !wide) {
__ movl(as_Address(addr), NULL_WORD);
move_regs(src->as_register_lo(), dest->as_register());
return;
}
#endif
assert(src->is_single_cpu(), "must match");
- if (src->type() == T_OBJECT) {
+ if (src->type() == T_OBJECT || src->type() == T_PRIMITIVE_OBJECT) {
__ verify_oop(src->as_register());
}
move_regs(src->as_register(), dest->as_register());
} else if (dest->is_double_cpu()) {
}
#endif // _LP64
break;
}
+ case T_PRIMITIVE_OBJECT: // fall through
case T_ARRAY: // fall through
case T_OBJECT: // fall through
if (UseCompressedOops && !wide) {
__ movl(as_Address(to_addr), compressed_src);
} else {
assert(dest->is_register(), "should not call otherwise");
LIR_Address* addr = src->as_address_ptr();
Address from_addr = as_Address(addr);
- if (addr->base()->type() == T_OBJECT) {
+ if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_PRIMITIVE_OBJECT) {
__ verify_oop(addr->base()->as_pointer_register());
}
switch (type) {
case T_BOOLEAN: // fall through
#endif // !LP64
}
break;
}
+ case T_PRIMITIVE_OBJECT: // fall through
case T_OBJECT: // fall through
case T_ARRAY: // fall through
if (UseCompressedOops && !wide) {
__ movl(dest->as_register(), from_addr);
} else {
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
Register len = op->len()->as_register();
LP64_ONLY( __ movslq(len, len); )
- if (UseSlowPath ||
+ if (UseSlowPath || op->type() == T_PRIMITIVE_OBJECT ||
(!UseFastNewObjectArray && is_reference_type(op->type())) ||
(!UseFastNewTypeArray && !is_reference_type(op->type()))) {
__ jmp(*op->stub()->entry());
} else {
Register tmp1 = op->tmp1()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
}
assert_different_registers(obj, k_RInfo, klass_RInfo);
- __ cmpptr(obj, NULL_WORD);
- if (op->should_profile()) {
- Label not_null;
- __ jccb(Assembler::notEqual, not_null);
- // Object is null; update MDO and exit
- Register mdo = klass_RInfo;
- __ mov_metadata(mdo, md->constant_encoding());
- Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
- int header_bits = BitData::null_seen_byte_constant();
- __ orb(data_addr, header_bits);
- __ jmp(*obj_is_null);
- __ bind(not_null);
- } else {
- __ jcc(Assembler::equal, *obj_is_null);
+ if (op->need_null_check()) {
+ __ cmpptr(obj, NULL_WORD);
+ if (op->should_profile()) {
+ Label not_null;
+ __ jccb(Assembler::notEqual, not_null);
+ // Object is null; update MDO and exit
+ Register mdo = klass_RInfo;
+ __ mov_metadata(mdo, md->constant_encoding());
+ Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
+ int header_bits = BitData::null_seen_byte_constant();
+ __ orb(data_addr, header_bits);
+ __ jmp(*obj_is_null);
+ __ bind(not_null);
+ } else {
+ __ jcc(Assembler::equal, *obj_is_null);
+ }
}
if (!k->is_loaded()) {
klass2reg_with_patching(k_RInfo, op->info_for_patch());
} else {
ShouldNotReachHere();
}
}
+ void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
+ // We are loading/storing from/to an array that *may* be flattened (the
+ // declared type is Object[], abstract[], interface[] or VT.ref[]).
+ // If this array is flattened, take the slow path.
+ Register klass = op->tmp()->as_register();
+ if (UseArrayMarkWordCheck) {
+ __ test_flattened_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
+ } else {
+ Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
+ __ load_klass(klass, op->array()->as_register(), tmp_load_klass);
+ __ movl(klass, Address(klass, Klass::layout_helper_offset()));
+ __ testl(klass, Klass::_lh_array_tag_flat_value_bit_inplace);
+ __ jcc(Assembler::notZero, *op->stub()->entry());
+ }
+ if (!op->value()->is_illegal()) {
+ // The array is not flattened, but it might be null-free. If we are storing
+ // a null into a null-free array, take the slow path (which will throw NPE).
+ Label skip;
+ __ cmpptr(op->value()->as_register(), NULL_WORD);
+ __ jcc(Assembler::notEqual, skip);
+ if (UseArrayMarkWordCheck) {
+ __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
+ } else {
+ __ testl(klass, Klass::_lh_null_free_array_bit_inplace);
+ __ jcc(Assembler::notZero, *op->stub()->entry());
+ }
+ __ bind(skip);
+ }
+ }
+
+ void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
+ // We are storing into an array that *may* be null-free (the declared type is
+ // Object[], abstract[], interface[] or VT.ref[]).
+ if (UseArrayMarkWordCheck) {
+ Label test_mark_word;
+ Register tmp = op->tmp()->as_register();
+ __ movptr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
+ __ testl(tmp, markWord::unlocked_value);
+ __ jccb(Assembler::notZero, test_mark_word);
+ __ load_prototype_header(tmp, op->array()->as_register(), rscratch1);
+ __ bind(test_mark_word);
+ __ testl(tmp, markWord::null_free_array_bit_in_place);
+ } else {
+ Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
+ Register klass = op->tmp()->as_register();
+ __ load_klass(klass, op->array()->as_register(), tmp_load_klass);
+ __ movl(klass, Address(klass, Klass::layout_helper_offset()));
+ __ testl(klass, Klass::_lh_null_free_array_bit_inplace);
+ }
+ }
+
+ void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
+ Label L_oops_equal;
+ Label L_oops_not_equal;
+ Label L_end;
+
+ Register left = op->left()->as_register();
+ Register right = op->right()->as_register();
+
+ __ cmpptr(left, right);
+ __ jcc(Assembler::equal, L_oops_equal);
+
+ // (1) Null check -- if one of the operands is null, the other must not be null (because
+ // the two references are not equal), so they are not substitutable,
+ // FIXME: do null check only if the operand is nullable
+ __ testptr(left, right);
+ __ jcc(Assembler::zero, L_oops_not_equal);
+
+ ciKlass* left_klass = op->left_klass();
+ ciKlass* right_klass = op->right_klass();
+
+ // (2) Inline type check -- if either of the operands is not a inline type,
+ // they are not substitutable. We do this only if we are not sure that the
+ // operands are inline type
+ if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
+ !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
+ Register tmp1 = op->tmp1()->as_register();
+ __ movptr(tmp1, (intptr_t)markWord::inline_type_pattern);
+ __ andptr(tmp1, Address(left, oopDesc::mark_offset_in_bytes()));
+ __ andptr(tmp1, Address(right, oopDesc::mark_offset_in_bytes()));
+ __ cmpptr(tmp1, (intptr_t)markWord::inline_type_pattern);
+ __ jcc(Assembler::notEqual, L_oops_not_equal);
+ }
+
+ // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
+ if (left_klass != NULL && left_klass->is_inlinetype() && left_klass == right_klass) {
+ // No need to load klass -- the operands are statically known to be the same inline klass.
+ __ jmp(*op->stub()->entry());
+ } else {
+ Register left_klass_op = op->left_klass_op()->as_register();
+ Register right_klass_op = op->right_klass_op()->as_register();
+
+ if (UseCompressedClassPointers) {
+ __ movl(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
+ __ movl(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
+ __ cmpl(left_klass_op, right_klass_op);
+ } else {
+ __ movptr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
+ __ movptr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
+ __ cmpptr(left_klass_op, right_klass_op);
+ }
+
+ __ jcc(Assembler::equal, *op->stub()->entry()); // same klass -> do slow check
+ // fall through to L_oops_not_equal
+ }
+
+ __ bind(L_oops_not_equal);
+ move(op->not_equal_result(), op->result_opr());
+ __ jmp(L_end);
+
+ __ bind(L_oops_equal);
+ move(op->equal_result(), op->result_opr());
+ __ jmp(L_end);
+
+ // We've returned from the stub. RAX contains 0x0 IFF the two
+ // operands are not substitutable. (Don't compare against 0x1 in case the
+ // C compiler is naughty)
+ __ bind(*op->stub()->continuation());
+ __ cmpl(rax, 0);
+ __ jcc(Assembler::equal, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
+ move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
+ // fall-through
+ __ bind(L_end);
+ }
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
} else {
Unimplemented();
}
}
+ void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
+ assert(dst->is_cpu_register(), "must be");
+ assert(dst->type() == src->type(), "must be");
+
+ if (src->is_cpu_register()) {
+ reg2reg(src, dst);
+ } else if (src->is_stack()) {
+ stack2reg(src, dst, dst->type());
+ } else if (src->is_constant()) {
+ const2reg(src, dst, lir_patch_none, NULL);
+ } else {
+ ShouldNotReachHere();
+ }
+ }
+
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
Assembler::Condition acond, ncond;
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned");
__ call(AddressLiteral(op->addr(), rtype));
- add_call_info(code_offset(), op->info());
+ add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
__ post_call_nop();
}
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
__ ic_call(op->addr());
- add_call_info(code_offset(), op->info());
+ add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned");
__ post_call_nop();
}
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
__ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
}
+ void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
+ if (null_check) {
+ __ testptr(obj, obj);
+ __ jcc(Assembler::zero, *slow_path->entry());
+ }
+ if (UseArrayMarkWordCheck) {
+ if (is_dest) {
+ __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
+ } else {
+ __ test_flattened_array_oop(obj, tmp, *slow_path->entry());
+ }
+ } else {
+ Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
+ __ load_klass(tmp, obj, tmp_load_klass);
+ __ movl(tmp, Address(tmp, Klass::layout_helper_offset()));
+ if (is_dest) {
+ // Take the slow path if it's a null_free destination array, in case the source array contains NULLs.
+ __ testl(tmp, Klass::_lh_null_free_array_bit_inplace);
+ } else {
+ __ testl(tmp, Klass::_lh_array_tag_flat_value_bit_inplace);
+ }
+ __ jcc(Assembler::notZero, *slow_path->entry());
+ }
+ }
+
+
// This code replaces a call to arraycopy; no exception may
// be thrown in this code, they must be thrown in the System.arraycopy
// activation frame; we could save some checks if this would not be the case
void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
ciArrayKlass* default_type = op->expected_type();
CodeStub* stub = op->stub();
int flags = op->flags();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (is_reference_type(basic_type)) basic_type = T_OBJECT;
+ if (flags & LIR_OpArrayCopy::always_slow_path) {
+ __ jmp(*stub->entry());
+ __ bind(*stub->continuation());
+ return;
+ }
+
// if we don't know anything, just go through the generic arraycopy
if (default_type == NULL) {
// save outgoing arguments on stack in case call to System.arraycopy is needed
// HACK ALERT. This code used to push the parameters in a hardwired fashion
// for interpreter calling conventions. Now we have to do it in new style conventions.
__ bind(*stub->continuation());
return;
}
+ // Handle inline type arrays
+ if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
+ arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
+ }
+ if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
+ arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
+ }
+
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
int elem_size = type2aelembytes(basic_type);
Address::ScaleFactor scale;
__ bind(next);
}
}
+ void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
+ Register obj = op->obj()->as_register();
+ Register tmp = op->tmp()->as_pointer_register();
+ Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
+ bool not_null = op->not_null();
+ int flag = op->flag();
+
+ Label not_inline_type;
+ if (!not_null) {
+ __ testptr(obj, obj);
+ __ jccb(Assembler::zero, not_inline_type);
+ }
+
+ __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
+
+ __ orb(mdo_addr, flag);
+
+ __ bind(not_inline_type);
+ }
+
void LIR_Assembler::emit_delay(LIR_OpDelay*) {
Unimplemented();
}
#else
__ get_thread(result_reg->as_register());
#endif // _LP64
}
+ void LIR_Assembler::check_orig_pc() {
+ __ cmpptr(frame_map()->address_for_orig_pc_addr(), NULL_WORD);
+ }
void LIR_Assembler::peephole(LIR_List*) {
// do nothing for now
}
< prev index next >