< prev index next > src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
Print this page
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_LIRGenerator.hpp"
#include "c1/c1_Runtime1.hpp"
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArray.hpp"
+ #include "ci/ciInlineKlass.hpp"
#include "ci/ciObjArrayKlass.hpp"
#include "ci/ciTypeArrayKlass.hpp"
#include "compiler/compilerDefinitions.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
set_vreg_flag(reg, LIRGenerator::byte_reg);
return reg;
}
+ void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
+ tmp1 = new_register(T_INT);
+ tmp2 = LIR_OprFact::illegalOpr;
+ }
+
+
//--------- loading items into registers --------------------------------
bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
if (v->type()->as_IntConstant() != NULL) {
set_no_result(x);
// "lock" stores the address of the monitor stack slot, so this is not an oop
LIR_Opr lock = new_register(T_INT);
CodeEmitInfo* info_for_exception = NULL;
if (x->needs_null_check()) {
info_for_exception = state_for(x);
}
// this CodeEmitInfo must not have the xhandlers because here the
// object is already locked (xhandlers expect object to be unlocked)
CodeEmitInfo* info = state_for(x, x->state(), true);
! monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
! x->monitor_no(), info_for_exception, info);
}
void LIRGenerator::do_MonitorExit(MonitorExit* x) {
assert(x->is_pinned(),"");
set_no_result(x);
// "lock" stores the address of the monitor stack slot, so this is not an oop
LIR_Opr lock = new_register(T_INT);
+ // Need a scratch register for inline type
+ LIR_Opr scratch = LIR_OprFact::illegalOpr;
+ if (EnableValhalla && x->maybe_inlinetype()) {
+ scratch = new_register(T_INT);
+ }
CodeEmitInfo* info_for_exception = NULL;
if (x->needs_null_check()) {
info_for_exception = state_for(x);
}
+
+ CodeStub* throw_imse_stub =
+ x->maybe_inlinetype() ?
+ new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id, LIR_OprFact::illegalOpr, state_for(x)) :
+ NULL;
+
// this CodeEmitInfo must not have the xhandlers because here the
// object is already locked (xhandlers expect object to be unlocked)
CodeEmitInfo* info = state_for(x, x->state(), true);
! monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
! x->monitor_no(), info_for_exception, info, throw_imse_stub);
}
void LIRGenerator::do_MonitorExit(MonitorExit* x) {
assert(x->is_pinned(),"");
}
#endif
CodeEmitInfo* info = state_for(x, x->state());
LIR_Opr reg = result_register_for(x->type());
new_instance(reg, x->klass(), x->is_unresolved(),
! FrameMap::r10_oop_opr,
! FrameMap::r11_oop_opr,
! FrameMap::r4_oop_opr,
! LIR_OprFact::illegalOpr,
! FrameMap::r3_metadata_opr, info);
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
CodeEmitInfo* info = state_for(x, x->state());
}
#endif
CodeEmitInfo* info = state_for(x, x->state());
LIR_Opr reg = result_register_for(x->type());
new_instance(reg, x->klass(), x->is_unresolved(),
! /* allow_inline */ false,
! FrameMap::r10_oop_opr,
! FrameMap::r11_oop_opr,
! FrameMap::r4_oop_opr,
! LIR_OprFact::illegalOpr,
+ FrameMap::r3_metadata_opr, info);
+ LIR_Opr result = rlock_result(x);
+ __ move(reg, result);
+ }
+
+ void LIRGenerator::do_NewInlineTypeInstance(NewInlineTypeInstance* x) {
+ // Mapping to do_NewInstance (same code) but use state_before for reexecution.
+ CodeEmitInfo* info = state_for(x, x->state_before());
+ x->set_to_object_type();
+ LIR_Opr reg = result_register_for(x->type());
+ new_instance(reg, x->klass(), false,
+ /* allow_inline */ true,
+ FrameMap::r10_oop_opr,
+ FrameMap::r11_oop_opr,
+ FrameMap::r4_oop_opr,
+ LIR_OprFact::illegalOpr,
+ FrameMap::r3_metadata_opr, info);
LIR_Opr result = rlock_result(x);
__ move(reg, result);
+
}
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
CodeEmitInfo* info = state_for(x, x->state());
LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
length.load_item_force(FrameMap::r19_opr);
LIR_Opr len = length.result();
! CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
! ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
if (obj == ciEnv::unloaded_ciobjarrayklass()) {
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
}
klass2reg_with_patching(klass_reg, obj, patching_info);
! __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
length.load_item_force(FrameMap::r19_opr);
LIR_Opr len = length.result();
! ciKlass* obj = (ciKlass*) x->exact_type();
! CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
if (obj == ciEnv::unloaded_ciobjarrayklass()) {
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
}
+
klass2reg_with_patching(klass_reg, obj, patching_info);
! if (x->is_null_free()) {
+ __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_PRIMITIVE_OBJECT, klass_reg, slow_path);
+ } else {
+ __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
+ }
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
// info for exceptions
CodeEmitInfo* info_for_exception =
(x->needs_exception_state() ? state_for(x) :
state_for(x, x->state_before(), true /*ignore_xhandler*/));
+ if (x->is_null_free()) {
+ __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
+ }
CodeStub* stub;
if (x->is_incompatible_class_change_check()) {
assert(patching_info == NULL, "can't patch this");
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
LIR_Opr reg = rlock_result(x);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
__ checkcast(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), info_for_exception, patching_info, stub,
! x->profiled_method(), x->profiled_bci());
}
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
LIRItem obj(x->obj(), this);
LIR_Opr reg = rlock_result(x);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
+
+
__ checkcast(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), info_for_exception, patching_info, stub,
! x->profiled_method(), x->profiled_bci(), x->is_null_free());
+
}
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
LIRItem obj(x->obj(), this);
increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
}
! __ cmp(lir_cond(cond), left, right);
// Generate branch profiling. Profiling code doesn't kill flags.
profile_branch(x, cond);
move_to_phi(x->state());
if (x->x()->type()->is_float_kind()) {
__ branch(lir_cond(cond), x->tsux(), x->usux());
increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
}
! if (x->substitutability_check()) {
+ substitutability_check(x, *xin, *yin);
+ } else {
+ __ cmp(lir_cond(cond), left, right);
+ }
+
// Generate branch profiling. Profiling code doesn't kill flags.
profile_branch(x, cond);
move_to_phi(x->state());
if (x->x()->type()->is_float_kind()) {
__ branch(lir_cond(cond), x->tsux(), x->usux());
< prev index next >