< prev index next > src/hotspot/share/c1/c1_LIR.hpp
Print this page
class LIR_OpRTCall;
class LIR_OpArrayCopy;
class LIR_OpUpdateCRC32;
class LIR_OpLock;
class LIR_OpTypeCheck;
+ class LIR_OpFlattenedArrayCheck;
+ class LIR_OpNullFreeArrayCheck;
+ class LIR_OpSubstitutabilityCheck;
class LIR_OpCompareAndSwap;
class LIR_OpLoadKlass;
class LIR_OpProfileCall;
class LIR_OpProfileType;
+ class LIR_OpProfileInlineType;
#ifdef ASSERT
class LIR_OpAssert;
#endif
// LIR operation codes
, lir_membar_storestore
, lir_membar_loadstore
, lir_membar_storeload
, lir_get_thread
, lir_on_spin_wait
+ , lir_check_orig_pc
, end_op0
, begin_op1
, lir_fxch
, lir_fld
, lir_push
, begin_opTypeCheck
, lir_instanceof
, lir_checkcast
, lir_store_check
, end_opTypeCheck
+ , begin_opFlattenedArrayCheck
+ , lir_flat_array_check
+ , end_opFlattenedArrayCheck
+ , begin_opNullFreeArrayCheck
+ , lir_null_free_array_check
+ , end_opNullFreeArrayCheck
+ , begin_opSubstitutabilityCheck
+ , lir_substitutability_check
+ , end_opSubstitutabilityCheck
, begin_opCompareAndSwap
, lir_cas_long
, lir_cas_obj
, lir_cas_int
, end_opCompareAndSwap
, begin_opMDOProfile
, lir_profile_call
, lir_profile_type
+ , lir_profile_inline_type
, end_opMDOProfile
, begin_opAssert
, lir_assert
, end_opAssert
#ifdef INCLUDE_ZGC
virtual LIR_Op3* as_Op3() { return nullptr; }
virtual LIR_Op4* as_Op4() { return nullptr; }
virtual LIR_OpArrayCopy* as_OpArrayCopy() { return nullptr; }
virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return nullptr; }
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return nullptr; }
+ virtual LIR_OpFlattenedArrayCheck* as_OpFlattenedArrayCheck() { return nullptr; }
+ virtual LIR_OpNullFreeArrayCheck* as_OpNullFreeArrayCheck() { return nullptr; }
+ virtual LIR_OpSubstitutabilityCheck* as_OpSubstitutabilityCheck() { return nullptr; }
virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return nullptr; }
virtual LIR_OpLoadKlass* as_OpLoadKlass() { return nullptr; }
virtual LIR_OpProfileCall* as_OpProfileCall() { return nullptr; }
virtual LIR_OpProfileType* as_OpProfileType() { return nullptr; }
+ virtual LIR_OpProfileInlineType* as_OpProfileInlineType() { return nullptr; }
#ifdef ASSERT
virtual LIR_OpAssert* as_OpAssert() { return nullptr; }
#endif
virtual void verify() const {}
}
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+
+ bool maybe_return_as_fields(ciInlineKlass** vk = nullptr) const;
};
// --------------------------------------------------
// LIR_OpLabel
// --------------------------------------------------
type_check = 1 << 7,
overlapping = 1 << 8,
unaligned = 1 << 9,
src_objarray = 1 << 10,
dst_objarray = 1 << 11,
! all_flags = (1 << 12) - 1
};
LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
type_check = 1 << 7,
overlapping = 1 << 8,
unaligned = 1 << 9,
src_objarray = 1 << 10,
dst_objarray = 1 << 11,
! always_slow_path = 1 << 12,
+ src_inlinetype_check = 1 << 13,
+ dst_inlinetype_check = 1 << 14,
+ all_flags = (1 << 15) - 1
};
LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
CodeEmitInfo* _info_for_exception;
CodeStub* _stub;
ciMethod* _profiled_method;
int _profiled_bci;
bool _should_profile;
public:
LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
! CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
LIR_Opr object() const { return _object; }
LIR_Opr array() const { assert(code() == lir_store_check, "not valid"); return _array; }
CodeEmitInfo* _info_for_exception;
CodeStub* _stub;
ciMethod* _profiled_method;
int _profiled_bci;
bool _should_profile;
+ bool _need_null_check;
public:
LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
! CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, bool need_null_check = true);
LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
LIR_Opr object() const { return _object; }
LIR_Opr array() const { assert(code() == lir_store_check, "not valid"); return _array; }
void set_profiled_bci(int bci) { _profiled_bci = bci; }
void set_should_profile(bool b) { _should_profile = b; }
ciMethod* profiled_method() const { return _profiled_method; }
int profiled_bci() const { return _profiled_bci; }
bool should_profile() const { return _should_profile; }
!
virtual bool is_patching() { return _info_for_patch != nullptr; }
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
void print_instr(outputStream* out) const PRODUCT_RETURN;
};
// LIR_Op2
class LIR_Op2: public LIR_Op {
friend class LIR_OpVisitState;
int _fpu_stack_size; // for sin/cos implementation on Intel
void set_profiled_bci(int bci) { _profiled_bci = bci; }
void set_should_profile(bool b) { _should_profile = b; }
ciMethod* profiled_method() const { return _profiled_method; }
int profiled_bci() const { return _profiled_bci; }
bool should_profile() const { return _should_profile; }
! bool need_null_check() const { return _need_null_check; }
virtual bool is_patching() { return _info_for_patch != nullptr; }
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
void print_instr(outputStream* out) const PRODUCT_RETURN;
};
+ // LIR_OpFlattenedArrayCheck
+ class LIR_OpFlattenedArrayCheck: public LIR_Op {
+ friend class LIR_OpVisitState;
+
+ private:
+ LIR_Opr _array;
+ LIR_Opr _value;
+ LIR_Opr _tmp;
+ CodeStub* _stub;
+ public:
+ LIR_OpFlattenedArrayCheck(LIR_Opr array, LIR_Opr value, LIR_Opr tmp, CodeStub* stub);
+ LIR_Opr array() const { return _array; }
+ LIR_Opr value() const { return _value; }
+ LIR_Opr tmp() const { return _tmp; }
+ CodeStub* stub() const { return _stub; }
+
+ virtual void emit_code(LIR_Assembler* masm);
+ virtual LIR_OpFlattenedArrayCheck* as_OpFlattenedArrayCheck() { return this; }
+ virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+ };
+
+ // LIR_OpNullFreeArrayCheck
+ class LIR_OpNullFreeArrayCheck: public LIR_Op {
+ friend class LIR_OpVisitState;
+
+ private:
+ LIR_Opr _array;
+ LIR_Opr _tmp;
+ public:
+ LIR_OpNullFreeArrayCheck(LIR_Opr array, LIR_Opr tmp);
+ LIR_Opr array() const { return _array; }
+ LIR_Opr tmp() const { return _tmp; }
+
+ virtual void emit_code(LIR_Assembler* masm);
+ virtual LIR_OpNullFreeArrayCheck* as_OpNullFreeArrayCheck() { return this; }
+ virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+ };
+
+ class LIR_OpSubstitutabilityCheck: public LIR_Op {
+ friend class LIR_OpVisitState;
+
+ private:
+ LIR_Opr _left;
+ LIR_Opr _right;
+ LIR_Opr _equal_result;
+ LIR_Opr _not_equal_result;
+ LIR_Opr _tmp1;
+ LIR_Opr _tmp2;
+ ciKlass* _left_klass;
+ ciKlass* _right_klass;
+ LIR_Opr _left_klass_op;
+ LIR_Opr _right_klass_op;
+ CodeStub* _stub;
+ public:
+ LIR_OpSubstitutabilityCheck(LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr equal_result, LIR_Opr not_equal_result,
+ LIR_Opr tmp1, LIR_Opr tmp2,
+ ciKlass* left_klass, ciKlass* right_klass, LIR_Opr left_klass_op, LIR_Opr right_klass_op,
+ CodeEmitInfo* info, CodeStub* stub);
+
+ LIR_Opr left() const { return _left; }
+ LIR_Opr right() const { return _right; }
+ LIR_Opr equal_result() const { return _equal_result; }
+ LIR_Opr not_equal_result() const { return _not_equal_result; }
+ LIR_Opr tmp1() const { return _tmp1; }
+ LIR_Opr tmp2() const { return _tmp2; }
+ ciKlass* left_klass() const { return _left_klass; }
+ ciKlass* right_klass() const { return _right_klass; }
+ LIR_Opr left_klass_op() const { return _left_klass_op; }
+ LIR_Opr right_klass_op() const { return _right_klass_op; }
+ CodeStub* stub() const { return _stub; }
+
+ virtual void emit_code(LIR_Assembler* masm);
+ virtual LIR_OpSubstitutabilityCheck* as_OpSubstitutabilityCheck() { return this; }
+ virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+ };
+
// LIR_Op2
class LIR_Op2: public LIR_Op {
friend class LIR_OpVisitState;
int _fpu_stack_size; // for sin/cos implementation on Intel
LIR_Opr _tmp3;
LIR_Opr _tmp4;
BasicType _type;
CodeStub* _stub;
bool _zero_array;
public:
! LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub, bool zero_array)
: LIR_Op(lir_alloc_array, result, nullptr)
, _klass(klass)
, _len(len)
, _tmp1(t1)
, _tmp2(t2)
, _tmp3(t3)
, _tmp4(t4)
, _type(type)
, _stub(stub)
! , _zero_array(zero_array) {}
LIR_Opr klass() const { return _klass; }
LIR_Opr len() const { return _len; }
LIR_Opr obj() const { return result_opr(); }
LIR_Opr tmp1() const { return _tmp1; }
LIR_Opr tmp2() const { return _tmp2; }
LIR_Opr tmp3() const { return _tmp3; }
LIR_Opr tmp4() const { return _tmp4; }
BasicType type() const { return _type; }
CodeStub* stub() const { return _stub; }
! bool zero_array() const { return _zero_array; }
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
};
LIR_Opr _tmp3;
LIR_Opr _tmp4;
BasicType _type;
CodeStub* _stub;
bool _zero_array;
+ bool _is_null_free;
public:
! LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub, bool zero_array, bool is_null_free)
: LIR_Op(lir_alloc_array, result, nullptr)
, _klass(klass)
, _len(len)
, _tmp1(t1)
, _tmp2(t2)
, _tmp3(t3)
, _tmp4(t4)
, _type(type)
, _stub(stub)
! , _zero_array(zero_array)
+ , _is_null_free(is_null_free) {}
LIR_Opr klass() const { return _klass; }
LIR_Opr len() const { return _len; }
LIR_Opr obj() const { return result_opr(); }
LIR_Opr tmp1() const { return _tmp1; }
LIR_Opr tmp2() const { return _tmp2; }
LIR_Opr tmp3() const { return _tmp3; }
LIR_Opr tmp4() const { return _tmp4; }
BasicType type() const { return _type; }
CodeStub* stub() const { return _stub; }
! bool zero_array() const { return _zero_array; }
+ bool is_null_free() const { return _is_null_free;}
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
};
LIR_Opr _hdr;
LIR_Opr _obj;
LIR_Opr _lock;
LIR_Opr _scratch;
CodeStub* _stub;
public:
! LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
: LIR_Op(code, LIR_OprFact::illegalOpr, info)
, _hdr(hdr)
, _obj(obj)
, _lock(lock)
, _scratch(scratch)
! , _stub(stub) {}
LIR_Opr hdr_opr() const { return _hdr; }
LIR_Opr obj_opr() const { return _obj; }
LIR_Opr lock_opr() const { return _lock; }
LIR_Opr scratch_opr() const { return _scratch; }
CodeStub* stub() const { return _stub; }
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpLock* as_OpLock() { return this; }
void print_instr(outputStream* out) const PRODUCT_RETURN;
};
LIR_Opr _hdr;
LIR_Opr _obj;
LIR_Opr _lock;
LIR_Opr _scratch;
CodeStub* _stub;
+ CodeStub* _throw_ie_stub;
public:
! LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info, CodeStub* throw_ie_stub=nullptr)
: LIR_Op(code, LIR_OprFact::illegalOpr, info)
, _hdr(hdr)
, _obj(obj)
, _lock(lock)
, _scratch(scratch)
! , _stub(stub)
+ , _throw_ie_stub(throw_ie_stub) {}
LIR_Opr hdr_opr() const { return _hdr; }
LIR_Opr obj_opr() const { return _obj; }
LIR_Opr lock_opr() const { return _lock; }
LIR_Opr scratch_opr() const { return _scratch; }
CodeStub* stub() const { return _stub; }
+ CodeStub* throw_ie_stub() const { return _throw_ie_stub; }
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpLock* as_OpLock() { return this; }
void print_instr(outputStream* out) const PRODUCT_RETURN;
};
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpProfileType* as_OpProfileType() { return this; }
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
};
+ // LIR_OpProfileInlineType
+ class LIR_OpProfileInlineType : public LIR_Op {
+ friend class LIR_OpVisitState;
+
+ private:
+ LIR_Opr _mdp;
+ LIR_Opr _obj;
+ int _flag;
+ LIR_Opr _tmp;
+ bool _not_null; // true if we know statically that _obj cannot be null
+
+ public:
+ // Destroys recv
+ LIR_OpProfileInlineType(LIR_Opr mdp, LIR_Opr obj, int flag, LIR_Opr tmp, bool not_null)
+ : LIR_Op(lir_profile_inline_type, LIR_OprFact::illegalOpr, nullptr) // no result, no info
+ , _mdp(mdp)
+ , _obj(obj)
+ , _flag(flag)
+ , _tmp(tmp)
+ , _not_null(not_null) { }
+
+ LIR_Opr mdp() const { return _mdp; }
+ LIR_Opr obj() const { return _obj; }
+ int flag() const { return _flag; }
+ LIR_Opr tmp() const { return _tmp; }
+ bool not_null() const { return _not_null; }
+
+ virtual void emit_code(LIR_Assembler* masm);
+ virtual LIR_OpProfileInlineType* as_OpProfileInlineType() { return this; }
+ virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+ };
+
class LIR_InsertionBuffer;
//--------------------------------LIR_List---------------------------------------------------
// Maintains a list of LIR instructions (one instance of LIR_List per basic block)
// The LIR instructions are appended by the LIR_List class itself;
void idiv(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
void irem(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
! void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub, bool zero_array = true);
// jump is an unconditional branch
void jump(BlockBegin* block) {
append(new LIR_OpBranch(lir_cond_always, block));
}
void idiv(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
void irem(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
! void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub, bool zero_array = true, bool is_null_free = false);
// jump is an unconditional branch
void jump(BlockBegin* block) {
append(new LIR_OpBranch(lir_cond_always, block));
}
append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
}
void load_stack_address_monitor(int monitor_ix, LIR_Opr dst) { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
! void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
void breakpoint() { append(new LIR_Op0(lir_breakpoint)); }
void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res) { append(new LIR_OpUpdateCRC32(crc, val, res)); }
void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci);
void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
! ciMethod* profiled_method, int profiled_bci);
// MethodData* profiling
void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
}
void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
}
void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); }
void load_klass(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info) { append(new LIR_OpLoadKlass(obj, result, info)); }
append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
}
void load_stack_address_monitor(int monitor_ix, LIR_Opr dst) { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
! void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info, CodeStub* throw_ie_stub=nullptr);
void breakpoint() { append(new LIR_Op0(lir_breakpoint)); }
void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res) { append(new LIR_OpUpdateCRC32(crc, val, res)); }
void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci);
+ void check_flat_array(LIR_Opr array, LIR_Opr value, LIR_Opr tmp, CodeStub* stub);
+ void check_null_free_array(LIR_Opr array, LIR_Opr tmp);
+ void substitutability_check(LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr equal_result, LIR_Opr not_equal_result,
+ LIR_Opr tmp1, LIR_Opr tmp2,
+ ciKlass* left_klass, ciKlass* right_klass, LIR_Opr left_klass_op, LIR_Opr right_klass_op,
+ CodeEmitInfo* info, CodeStub* stub);
void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
! ciMethod* profiled_method, int profiled_bci, bool is_null_free);
// MethodData* profiling
void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
}
void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
}
+ void profile_inline_type(LIR_Address* mdp, LIR_Opr obj, int flag, LIR_Opr tmp, bool not_null) {
+ append(new LIR_OpProfileInlineType(LIR_OprFact::address(mdp), obj, flag, tmp, not_null));
+ }
void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); }
void load_klass(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info) { append(new LIR_OpLoadKlass(obj, result, info)); }
< prev index next >