< prev index next > src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
Print this page
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArrayKlass.hpp"
+ #include "ci/ciInlineKlass.hpp"
#include "ci/ciInstance.hpp"
#include "code/compiledIC.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gc_globals.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/objArrayKlass.hpp"
+ #include "oops/oop.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/powerOfTwo.hpp"
#include "vmreg_aarch64.inline.hpp"
__ mov(r0, r19); // Restore the exception
}
// remove the activation and dispatch to the unwind handler
__ block_comment("remove_frame and dispatch to the unwind handler");
- __ remove_frame(initial_frame_size_in_bytes());
+ __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
// Emit the slow path assembly
if (stub != NULL) {
stub->emit_code(this);
}
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
+ ciMethod* method = compilation()->method();
+ if (InlineTypeReturnedAsFields && method->return_type()->is_inlinetype()) {
+ ciInlineKlass* vk = method->return_type()->as_inline_klass();
+ if (vk->can_be_returned_as_fields()) {
+ address unpack_handler = vk->unpack_handler();
+ assert(unpack_handler != NULL, "must be");
+ __ far_call(RuntimeAddress(unpack_handler));
+ // At this point, r0 points to the value object (for interpreter or C1 caller).
+ // The fields of the object are copied into registers (for C2 caller).
+ }
+ }
+
// Pop the stack before the safepoint code
- __ remove_frame(initial_frame_size_in_bytes());
+ __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check();
}
__ relocate(relocInfo::poll_return_type);
__ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
__ ret(lr);
}
+ int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
+ return (__ store_inline_type_fields_to_buf(vk, false));
+ }
+
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
guarantee(info != NULL, "Shouldn't be NULL");
__ get_polling_page(rscratch1, relocInfo::poll_type);
add_debug_info_for_branch(info); // This isn't just debug info:
// it's the oop map
assert(patch_code == lir_patch_none, "no patching handled here");
__ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
break;
}
+ case T_PRIMITIVE_OBJECT:
case T_OBJECT: {
- if (patch_code == lir_patch_none) {
- jobject2reg(c->as_jobject(), dest->as_register());
- } else {
+ if (patch_code != lir_patch_none) {
jobject2reg_with_patching(dest->as_register(), info);
+ } else {
+ jobject2reg(c->as_jobject(), dest->as_register());
}
break;
}
case T_METADATA: {
}
void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
LIR_Const* c = src->as_constant_ptr();
switch (c->type()) {
+ case T_PRIMITIVE_OBJECT:
case T_OBJECT:
{
if (! c->as_jobject())
__ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
else {
break;
case T_INT:
assert(c->as_jint() == 0, "should be");
insn = &Assembler::strw;
break;
+ case T_PRIMITIVE_OBJECT:
case T_OBJECT:
case T_ARRAY:
+ // Non-null case is not handled on aarch64 but handled on x86
+ // FIXME: do we need to add it here?
assert(c->as_jobject() == 0, "should be");
if (UseCompressedOops && !wide) {
insn = &Assembler::strw;
} else {
insn = &Assembler::str;
// Can do LONG -> OBJECT
move_regs(src->as_register_lo(), dest->as_register());
return;
}
assert(src->is_single_cpu(), "must match");
- if (src->type() == T_OBJECT) {
+ if (src->type() == T_OBJECT || src->type() == T_PRIMITIVE_OBJECT) {
__ verify_oop(src->as_register());
}
move_regs(src->as_register(), dest->as_register());
} else if (dest->is_double_cpu()) {
case T_DOUBLE: {
__ strd(src->as_double_reg(), as_Address(to_addr));
break;
}
+ case T_PRIMITIVE_OBJECT: // fall through
case T_ARRAY: // fall through
case T_OBJECT: // fall through
if (UseCompressedOops && !wide) {
__ strw(compressed_src, as_Address(to_addr, rscratch2));
} else {
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
LIR_Address* addr = src->as_address_ptr();
LIR_Address* from_addr = src->as_address_ptr();
- if (addr->base()->type() == T_OBJECT) {
+ if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_PRIMITIVE_OBJECT) {
__ verify_oop(addr->base()->as_pointer_register());
}
if (patch_code != lir_patch_none) {
deoptimize_trap(info);
case T_DOUBLE: {
__ ldrd(dest->as_double_reg(), as_Address(from_addr));
break;
}
+ case T_PRIMITIVE_OBJECT: // fall through
case T_ARRAY: // fall through
case T_OBJECT: // fall through
if (UseCompressedOops && !wide) {
__ ldrw(dest->as_register(), as_Address(from_addr));
} else {
__ verify_oop(dest->as_register());
}
}
}
+ void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
+ assert(dst->is_cpu_register(), "must be");
+ assert(dst->type() == src->type(), "must be");
+
+ if (src->is_cpu_register()) {
+ reg2reg(src, dst);
+ } else if (src->is_stack()) {
+ stack2reg(src, dst, dst->type());
+ } else if (src->is_constant()) {
+ const2reg(src, dst, lir_patch_none, NULL);
+ } else {
+ ShouldNotReachHere();
+ }
+ }
int LIR_Assembler::array_element_size(BasicType type) const {
int elem_size = type2aelembytes(type);
return exact_log2(elem_size);
}
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
Register len = op->len()->as_register();
__ uxtw(len, len);
- if (UseSlowPath ||
+ if (UseSlowPath || op->type() == T_PRIMITIVE_OBJECT ||
(!UseFastNewObjectArray && is_reference_type(op->type())) ||
(!UseFastNewTypeArray && !is_reference_type(op->type()))) {
__ b(*op->stub()->entry());
} else {
Register tmp1 = op->tmp1()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
}
assert_different_registers(obj, k_RInfo, klass_RInfo);
+ if (op->need_null_check()) {
if (should_profile) {
Label not_null;
__ cbnz(obj, not_null);
// Object is null; update MDO and exit
Register mdo = klass_RInfo;
__ b(*obj_is_null);
__ bind(not_null);
} else {
__ cbz(obj, *obj_is_null);
}
+ }
if (!k->is_loaded()) {
klass2reg_with_patching(k_RInfo, op->info_for_patch());
} else {
__ mov_metadata(k_RInfo, k->constant_encoding());
} else {
ShouldNotReachHere();
}
}
+ void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
+ // We are loading/storing from/to an array that *may* be flattened (the
+ // declared type is Object[], abstract[], interface[] or VT.ref[]).
+ // If this array is flattened, take the slow path.
+
+ Register klass = op->tmp()->as_register();
+ if (UseArrayMarkWordCheck) {
+ __ test_flattened_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
+ } else {
+ __ load_klass(klass, op->array()->as_register());
+ __ ldrw(klass, Address(klass, Klass::layout_helper_offset()));
+ __ tst(klass, Klass::_lh_array_tag_flat_value_bit_inplace);
+ __ br(Assembler::NE, *op->stub()->entry());
+ }
+ if (!op->value()->is_illegal()) {
+ // The array is not flattened, but it might be null-free. If we are storing
+ // a null into a null-free array, take the slow path (which will throw NPE).
+ Label skip;
+ __ cbnz(op->value()->as_register(), skip);
+ if (UseArrayMarkWordCheck) {
+ __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
+ } else {
+ __ tst(klass, Klass::_lh_null_free_array_bit_inplace);
+ __ br(Assembler::NE, *op->stub()->entry());
+ }
+ __ bind(skip);
+ }
+ }
+
+ void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
+ // We are storing into an array that *may* be null-free (the declared type is
+ // Object[], abstract[], interface[] or VT.ref[]).
+ if (UseArrayMarkWordCheck) {
+ Label test_mark_word;
+ Register tmp = op->tmp()->as_register();
+ __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
+ __ tst(tmp, markWord::unlocked_value);
+ __ br(Assembler::NE, test_mark_word);
+ __ load_prototype_header(tmp, op->array()->as_register());
+ __ bind(test_mark_word);
+ __ tst(tmp, markWord::null_free_array_bit_in_place);
+ } else {
+ Register klass = op->tmp()->as_register();
+ __ load_klass(klass, op->array()->as_register());
+ __ ldr(klass, Address(klass, Klass::layout_helper_offset()));
+ __ tst(klass, Klass::_lh_null_free_array_bit_inplace);
+ }
+ }
+
+ void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
+ Label L_oops_equal;
+ Label L_oops_not_equal;
+ Label L_end;
+
+ Register left = op->left()->as_register();
+ Register right = op->right()->as_register();
+
+ __ cmp(left, right);
+ __ br(Assembler::EQ, L_oops_equal);
+
+ // (1) Null check -- if one of the operands is null, the other must not be null (because
+ // the two references are not equal), so they are not substitutable,
+ // FIXME: do null check only if the operand is nullable
+ {
+ __ cbz(left, L_oops_not_equal);
+ __ cbz(right, L_oops_not_equal);
+ }
+
+ ciKlass* left_klass = op->left_klass();
+ ciKlass* right_klass = op->right_klass();
+
+ // (2) Inline type check -- if either of the operands is not a inline type,
+ // they are not substitutable. We do this only if we are not sure that the
+ // operands are inline type
+ if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
+ !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
+ Register tmp1 = op->tmp1()->as_register();
+ __ mov(tmp1, markWord::inline_type_pattern);
+ __ ldr(rscratch1, Address(left, oopDesc::mark_offset_in_bytes()));
+ __ andr(tmp1, tmp1, rscratch1);
+ __ ldr(rscratch1, Address(right, oopDesc::mark_offset_in_bytes()));
+ __ andr(tmp1, tmp1, rscratch1);
+ __ cmp(tmp1, (u1)markWord::inline_type_pattern);
+ __ br(Assembler::NE, L_oops_not_equal);
+ }
+
+ // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
+ if (left_klass != NULL && left_klass->is_inlinetype() && left_klass == right_klass) {
+ // No need to load klass -- the operands are statically known to be the same inline klass.
+ __ b(*op->stub()->entry());
+ } else {
+ Register left_klass_op = op->left_klass_op()->as_register();
+ Register right_klass_op = op->right_klass_op()->as_register();
+
+ if (UseCompressedClassPointers) {
+ __ ldrw(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
+ __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
+ __ cmpw(left_klass_op, right_klass_op);
+ } else {
+ __ ldr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
+ __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
+ __ cmp(left_klass_op, right_klass_op);
+ }
+
+ __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
+ // fall through to L_oops_not_equal
+ }
+
+ __ bind(L_oops_not_equal);
+ move(op->not_equal_result(), op->result_opr());
+ __ b(L_end);
+
+ __ bind(L_oops_equal);
+ move(op->equal_result(), op->result_opr());
+ __ b(L_end);
+
+ // We've returned from the stub. R0 contains 0x0 IFF the two
+ // operands are not substitutable. (Don't compare against 0x1 in case the
+ // C compiler is naughty)
+ __ bind(*op->stub()->continuation());
+ __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
+ move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
+ // fall-through
+ __ bind(L_end);
+ }
+
+
void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
__ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
__ cset(rscratch1, Assembler::NE);
__ membar(__ AnyAny);
}
imm = opr2->as_constant_ptr()->as_jint();
break;
case T_METADATA:
imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
break;
+ case T_PRIMITIVE_OBJECT:
case T_OBJECT:
case T_ARRAY:
jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
__ cmpoop(reg1, rscratch1);
return;
address call = __ trampoline_call(Address(op->addr(), rtype));
if (call == NULL) {
bailout("trampoline stub overflow");
return;
}
- add_call_info(code_offset(), op->info());
+ add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
__ post_call_nop();
}
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
address call = __ ic_call(op->addr());
if (call == NULL) {
bailout("trampoline stub overflow");
return;
}
- add_call_info(code_offset(), op->info());
+ add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
__ post_call_nop();
}
void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc();
ShouldNotReachHere();
break;
}
break;
case T_LONG:
+ case T_PRIMITIVE_OBJECT:
case T_ADDRESS:
case T_OBJECT:
switch (code) {
case lir_shl: __ lslv (dreg, lreg, count->as_register()); break;
case lir_shr: __ asrv (dreg, lreg, count->as_register()); break;
break;
}
break;
case T_LONG:
case T_ADDRESS:
+ case T_PRIMITIVE_OBJECT:
case T_OBJECT:
switch (code) {
case lir_shl: __ lsl (dreg, lreg, count); break;
case lir_shr: __ asr (dreg, lreg, count); break;
case lir_ushr: __ lsr (dreg, lreg, count); break;
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
__ lea(rscratch1, __ constant_oop_address(o));
__ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
}
+ void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
+ if (null_check) {
+ __ cbz(obj, *slow_path->entry());
+ }
+ if (UseArrayMarkWordCheck) {
+ if (is_dest) {
+ __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
+ } else {
+ __ test_flattened_array_oop(obj, tmp, *slow_path->entry());
+ }
+ } else {
+ __ load_klass(tmp, obj);
+ __ ldr(tmp, Address(tmp, Klass::layout_helper_offset()));
+ if (is_dest) {
+ // Take the slow path if it's a null_free destination array, in case the source array contains NULLs.
+ __ tst(tmp, Klass::_lh_null_free_array_bit_inplace);
+ } else {
+ __ tst(tmp, Klass::_lh_array_tag_flat_value_bit_inplace);
+ }
+ __ br(Assembler::NE, *slow_path->entry());
+ }
+ }
// This code replaces a call to arraycopy; no exception may
// be thrown in this code, they must be thrown in the System.arraycopy
// activation frame; we could save some checks if this would not be the case
void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
CodeStub* stub = op->stub();
int flags = op->flags();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (is_reference_type(basic_type)) basic_type = T_OBJECT;
+ if (flags & LIR_OpArrayCopy::always_slow_path) {
+ __ b(*stub->entry());
+ __ bind(*stub->continuation());
+ return;
+ }
+
// if we don't know anything, just go through the generic arraycopy
if (default_type == NULL // || basic_type == T_OBJECT
) {
Label done;
assert(src == r1 && src_pos == r2, "mismatch in calling convention");
__ bind(*stub->continuation());
return;
}
+ // Handle inline type arrays
+ if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
+ arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
+ }
+
+ if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
+ arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
+ }
+
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
int elem_size = type2aelembytes(basic_type);
int scale = exact_log2(elem_size);
__ bind(next);
}
COMMENT("} emit_profile_type");
}
+ void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
+ Register obj = op->obj()->as_register();
+ Register tmp = op->tmp()->as_pointer_register();
+ bool not_null = op->not_null();
+ int flag = op->flag();
+
+ Label not_inline_type;
+ if (!not_null) {
+ __ cbz(obj, not_inline_type);
+ }
+
+ __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
+
+ Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), rscratch2);
+ __ ldrb(rscratch1, mdo_addr);
+ __ orr(rscratch1, rscratch1, flag);
+ __ strb(rscratch1, mdo_addr);
+
+ __ bind(not_inline_type);
+ }
void LIR_Assembler::align_backward_branch_target() {
}
void LIR_Assembler::get_thread(LIR_Opr result_reg) {
__ mov(result_reg->as_register(), rthread);
}
+ void LIR_Assembler::check_orig_pc() {
+ __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
+ __ cmp(rscratch2, (u1)NULL_WORD);
+ }
void LIR_Assembler::peephole(LIR_List *lir) {
#if 0
if (tableswitch_count >= max_tableswitches)
return;
break;
case T_LONG:
xchg = &MacroAssembler::atomic_xchgal;
add = &MacroAssembler::atomic_addal;
break;
+ case T_PRIMITIVE_OBJECT:
case T_OBJECT:
case T_ARRAY:
if (UseCompressedOops) {
xchg = &MacroAssembler::atomic_xchgalw;
add = &MacroAssembler::atomic_addalw;
< prev index next >