< prev index next > src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
Print this page
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_LIRGenerator.hpp"
#include "c1/c1_Runtime1.hpp"
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArray.hpp"
+ #include "ci/ciInlineKlass.hpp"
#include "ci/ciObjArrayKlass.hpp"
#include "ci/ciTypeArrayKlass.hpp"
#include "compiler/compilerDefinitions.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
set_vreg_flag(reg, LIRGenerator::byte_reg);
return reg;
}
+ void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
+ tmp1 = new_register(T_INT);
+ tmp2 = LIR_OprFact::illegalOpr;
+ }
+
+
//--------- loading items into registers --------------------------------
bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
if (v->type()->as_IntConstant() != nullptr) {
CodeEmitInfo* info_for_exception = nullptr;
if (x->needs_null_check()) {
info_for_exception = state_for(x);
}
+
+ CodeStub* throw_ie_stub =
+ x->maybe_inlinetype() ?
+ new SimpleExceptionStub(C1StubId::throw_identity_exception_id, obj.result(), state_for(x)) :
+ nullptr;
+
// this CodeEmitInfo must not have the xhandlers because here the
// object is already locked (xhandlers expect object to be unlocked)
CodeEmitInfo* info = state_for(x, x->state(), true);
monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
- x->monitor_no(), info_for_exception, info);
+ x->monitor_no(), info_for_exception, info, throw_ie_stub);
}
void LIRGenerator::do_MonitorExit(MonitorExit* x) {
assert(x->is_pinned(),"");
#ifndef PRODUCT
if (PrintNotLoaded && !x->klass()->is_loaded()) {
tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
}
#endif
- CodeEmitInfo* info = state_for(x, x->state());
+ CodeEmitInfo* info = state_for(x, x->needs_state_before() ? x->state_before() : x->state());
LIR_Opr reg = result_register_for(x->type());
new_instance(reg, x->klass(), x->is_unresolved(),
- FrameMap::r10_oop_opr,
- FrameMap::r11_oop_opr,
- FrameMap::r4_oop_opr,
- LIR_OprFact::illegalOpr,
- FrameMap::r3_metadata_opr, info);
+ /* allow_inline */ false,
+ FrameMap::r10_oop_opr,
+ FrameMap::r11_oop_opr,
+ FrameMap::r4_oop_opr,
+ LIR_OprFact::illegalOpr,
+ FrameMap::r3_metadata_opr, info);
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
length.load_item_force(FrameMap::r19_opr);
LIR_Opr len = length.result();
- CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
- ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
+ ciKlass* obj = (ciKlass*) x->exact_type();
+ CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
if (obj == ciEnv::unloaded_ciobjarrayklass()) {
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
}
+
klass2reg_with_patching(klass_reg, obj, patching_info);
- __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
+ __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path, true, x->is_null_free());
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
// info for exceptions
CodeEmitInfo* info_for_exception =
(x->needs_exception_state() ? state_for(x) :
state_for(x, x->state_before(), true /*ignore_xhandler*/));
+ if (x->is_null_free()) {
+ __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
+ }
CodeStub* stub;
if (x->is_incompatible_class_change_check()) {
assert(patching_info == nullptr, "can't patch this");
stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
LIR_Opr reg = rlock_result(x);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
+
+
__ checkcast(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), info_for_exception, patching_info, stub,
- x->profiled_method(), x->profiled_bci());
+ x->profiled_method(), x->profiled_bci(), x->is_null_free());
+
}
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
LIRItem obj(x->obj(), this);
increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
}
- __ cmp(lir_cond(cond), left, right);
+ if (x->substitutability_check()) {
+ substitutability_check(x, *xin, *yin);
+ } else {
+ __ cmp(lir_cond(cond), left, right);
+ }
+
// Generate branch profiling. Profiling code doesn't kill flags.
profile_branch(x, cond);
move_to_phi(x->state());
if (x->x()->type()->is_float_kind()) {
__ branch(lir_cond(cond), x->tsux(), x->usux());
< prev index next >