< prev index next > src/hotspot/share/c1/c1_LIRGenerator.cpp
Print this page
#include "c1/c1_Instruction.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_LIRGenerator.hpp"
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArrayKlass.hpp"
+ #include "ci/ciFlatArrayKlass.hpp"
+ #include "ci/ciInlineKlass.hpp"
#include "ci/ciInstance.hpp"
#include "ci/ciObjArray.hpp"
#include "ci/ciUtilities.hpp"
#include "compiler/compilerDefinitions.inline.hpp"
#include "compiler/compilerOracle.hpp"
_result = opr;
}
void LIRItem::load_item() {
+ assert(!_gen->in_conditional_code(), "LIRItem cannot be loaded in conditional code");
+
if (result()->is_illegal()) {
// update the items result
_result = value()->operand();
}
if (!result()->is_register()) {
default: ShouldNotReachHere();
}
}
! void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
if (!GenerateSynchronizationCode) return;
// for slow path, use debug info for state after successful locking
! CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
__ load_stack_address_monitor(monitor_no, lock);
// for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
! __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
}
void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
if (!GenerateSynchronizationCode) return;
default: ShouldNotReachHere();
}
}
! void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no,
+ CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_ie_stub) {
if (!GenerateSynchronizationCode) return;
// for slow path, use debug info for state after successful locking
! CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_ie_stub, scratch);
__ load_stack_address_monitor(monitor_no, lock);
// for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
! __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_ie_stub);
}
void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
if (!GenerateSynchronizationCode) return;
tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
}
}
#endif
! void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
! klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
! // If klass is not loaded we do not know if the klass has finalizers:
! if (UseFastNewInstance && klass->is_loaded()
&& !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
C1StubId stub_id = klass->is_initialized() ? C1StubId::fast_new_instance_id : C1StubId::fast_new_instance_init_check_id;
CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
}
}
#endif
! void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, bool allow_inline, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
! if (allow_inline) {
! assert(!is_unresolved && klass->is_loaded(), "inline type klass should be resolved");
! __ metadata2reg(klass->constant_encoding(), klass_reg);
+ } else {
+ klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
+ }
+ // If klass is not loaded we do not know if the klass has finalizers or is an unexpected inline klass
+ if (UseFastNewInstance && klass->is_loaded() && (allow_inline || !klass->is_inlinetype())
&& !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
C1StubId stub_id = klass->is_initialized() ? C1StubId::fast_new_instance_id : C1StubId::fast_new_instance_init_check_id;
CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
const int instance_size = align_object_size(klass->size_helper());
__ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
} else {
CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, C1StubId::new_instance_id);
! __ branch(lir_cond_always, slow_path);
__ branch_destination(slow_path->continuation());
}
}
const int instance_size = align_object_size(klass->size_helper());
__ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
} else {
CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, C1StubId::new_instance_id);
! __ jump(slow_path);
__ branch_destination(slow_path->continuation());
}
}
// if a probable array type has been identified, figure out if any
// of the required checks for a fast case can be elided.
int flags = LIR_OpArrayCopy::all_flags;
+ if (!src->is_loaded_flat_array() && !dst->is_loaded_flat_array()) {
+ flags &= ~LIR_OpArrayCopy::always_slow_path;
+ }
+ if (!src->maybe_flat_array()) {
+ flags &= ~LIR_OpArrayCopy::src_inlinetype_check;
+ }
+ if (!dst->maybe_flat_array() && !dst->maybe_null_free_array()) {
+ flags &= ~LIR_OpArrayCopy::dst_inlinetype_check;
+ }
+
if (!src_objarray)
flags &= ~LIR_OpArrayCopy::src_objarray;
if (!dst_objarray)
flags &= ~LIR_OpArrayCopy::dst_objarray;
// much less risk of confusion for C1 register allocator. The choice of the universe
// object here is correct as long as it returns the same modifiers we would expect
// from the primitive class itself. See spec for Class.getModifiers that provides
// the typed array klasses with similar modifiers as their component types.
Klass* univ_klass = Universe::byteArrayKlass();
! assert(univ_klass->modifier_flags() == (JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC), "Sanity");
LIR_Opr prim_klass = LIR_OprFact::metadataConst(univ_klass);
LIR_Opr recv_klass = new_register(T_METADATA);
__ move(new LIR_Address(receiver.result(), java_lang_Class::klass_offset(), T_ADDRESS), recv_klass, info);
// Check if this is a Java mirror of primitive type, and select the appropriate klass.
LIR_Opr klass = new_register(T_METADATA);
__ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(nullptr));
__ cmove(lir_cond_equal, prim_klass, recv_klass, klass, T_ADDRESS);
- // Get the answer.
- __ move(new LIR_Address(klass, in_bytes(Klass::modifier_flags_offset()), T_INT), result);
}
void LIRGenerator::do_getObjectSize(Intrinsic* x) {
assert(x->number_of_arguments() == 3, "wrong type");
LIR_Opr result_reg = rlock_result(x);
// much less risk of confusion for C1 register allocator. The choice of the universe
// object here is correct as long as it returns the same modifiers we would expect
// from the primitive class itself. See spec for Class.getModifiers that provides
// the typed array klasses with similar modifiers as their component types.
+ // Valhalla update: the code is now a bit convuloted because arrays and primitive
+ // classes don't have the same modifiers set anymore, but we cannot introduce
+ // branches in LIR generation (JDK-8211231). So, the first part of the code remains
+ // identical, using the byteArrayKlass object to avoid a NPE when accessing the
+ // modifiers. But then the code also prepares the correct modifiers set for
+ // primitive classes, and there's a second conditional move to put the right
+ // value into result.
+
+
Klass* univ_klass = Universe::byteArrayKlass();
! assert(univ_klass->modifier_flags() == (JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC
+ | (Arguments::enable_preview() ? JVM_ACC_IDENTITY : 0)), "Sanity");
LIR_Opr prim_klass = LIR_OprFact::metadataConst(univ_klass);
LIR_Opr recv_klass = new_register(T_METADATA);
__ move(new LIR_Address(receiver.result(), java_lang_Class::klass_offset(), T_ADDRESS), recv_klass, info);
// Check if this is a Java mirror of primitive type, and select the appropriate klass.
LIR_Opr klass = new_register(T_METADATA);
__ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(nullptr));
__ cmove(lir_cond_equal, prim_klass, recv_klass, klass, T_ADDRESS);
+ LIR_Opr klass_modifiers = new_register(T_INT);
+ __ move(new LIR_Address(klass, in_bytes(Klass::modifier_flags_offset()), T_INT), klass_modifiers);
+
+ LIR_Opr prim_modifiers = load_immediate(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC, T_INT);
+
+ __ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(0));
+ __ cmove(lir_cond_equal, prim_modifiers, klass_modifiers, result, T_INT);
}
void LIRGenerator::do_getObjectSize(Intrinsic* x) {
assert(x->number_of_arguments() == 3, "wrong type");
LIR_Opr result_reg = rlock_result(x);
}
}
LIR_Opr result = new_register(t);
__ move((LIR_Opr)c, result);
! _constants.append(c);
! _reg_for_constants.append(result);
return result;
}
//------------------------field access--------------------------------------
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
assert(x->number_of_arguments() == 4, "wrong type");
LIRItem obj (x->argument_at(0), this); // object
}
}
LIR_Opr result = new_register(t);
__ move((LIR_Opr)c, result);
! if (!in_conditional_code()) {
! _constants.append(c);
+ _reg_for_constants.append(result);
+ }
return result;
}
+ void LIRGenerator::set_in_conditional_code(bool v) {
+ assert(v != _in_conditional_code, "must change state");
+ _in_conditional_code = v;
+ }
+
+
//------------------------field access--------------------------------------
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
assert(x->number_of_arguments() == 4, "wrong type");
LIRItem obj (x->argument_at(0), this); // object
access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
}
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != nullptr;
bool obj_store = is_reference_type(x->elt_type());
! bool needs_store_check = obj_store && (x->value()->as_Constant() == nullptr ||
! !get_jobject_constant(x->value())->is_null_object() ||
! x->should_profile());
LIRItem array(x->array(), this);
LIRItem index(x->index(), this);
LIRItem value(x->value(), this);
LIRItem length(this);
access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
}
+ // FIXME -- I can't find any other way to pass an address to access_load_at().
+ class TempResolvedAddress: public Instruction {
+ public:
+ TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) {
+ set_operand(addr);
+ }
+ virtual void input_values_do(ValueVisitor*) {}
+ virtual void visit(InstructionVisitor* v) {}
+ virtual const char* name() const { return "TempResolvedAddress"; }
+ };
+
+ LIR_Opr LIRGenerator::get_and_load_element_address(LIRItem& array, LIRItem& index) {
+ ciType* array_type = array.value()->declared_type();
+ ciFlatArrayKlass* flat_array_klass = array_type->as_flat_array_klass();
+ assert(flat_array_klass->is_loaded(), "must be");
+
+ int array_header_size = flat_array_klass->array_header_in_bytes();
+ int shift = flat_array_klass->log2_element_size();
+
+ #ifndef _LP64
+ LIR_Opr index_op = new_register(T_INT);
+ // FIXME -- on 32-bit, the shift below can overflow, so we need to check that
+ // the top (shift+1) bits of index_op must be zero, or
+ // else throw ArrayIndexOutOfBoundsException
+ if (index.result()->is_constant()) {
+ jint const_index = index.result()->as_jint();
+ __ move(LIR_OprFact::intConst(const_index << shift), index_op);
+ } else {
+ __ shift_left(index_op, shift, index.result());
+ }
+ #else
+ LIR_Opr index_op = new_register(T_LONG);
+ if (index.result()->is_constant()) {
+ jint const_index = index.result()->as_jint();
+ __ move(LIR_OprFact::longConst(const_index << shift), index_op);
+ } else {
+ __ convert(Bytecodes::_i2l, index.result(), index_op);
+ // Need to shift manually, as LIR_Address can scale only up to 3.
+ __ shift_left(index_op, shift, index_op);
+ }
+ #endif
+
+ LIR_Opr elm_op = new_pointer_register();
+ LIR_Address* elm_address = generate_address(array.result(), index_op, 0, array_header_size, T_ADDRESS);
+ __ leal(LIR_OprFact::address(elm_address), elm_op);
+ return elm_op;
+ }
+
+ void LIRGenerator::access_sub_element(LIRItem& array, LIRItem& index, LIR_Opr& result, ciField* field, int sub_offset) {
+ assert(field != nullptr, "Need a subelement type specified");
+
+ // Find the starting address of the source (inside the array)
+ LIR_Opr elm_op = get_and_load_element_address(array, index);
+
+ BasicType subelt_type = field->type()->basic_type();
+ TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(subelt_type), elm_op);
+ LIRItem elm_item(elm_resolved_addr, this);
+
+ DecoratorSet decorators = IN_HEAP;
+ access_load_at(decorators, subelt_type,
+ elm_item, LIR_OprFact::intConst(sub_offset), result,
+ nullptr, nullptr);
+
+ if (field->is_null_free()) {
+ assert(field->type()->is_loaded(), "Must be");
+ assert(field->type()->is_inlinetype(), "Must be if loaded");
+ assert(field->type()->as_inline_klass()->is_initialized(), "Must be");
+ LabelObj* L_end = new LabelObj();
+ __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(nullptr));
+ __ branch(lir_cond_notEqual, L_end->label());
+ set_in_conditional_code(true);
+ Constant* default_value = new Constant(new InstanceConstant(field->type()->as_inline_klass()->default_instance()));
+ if (default_value->is_pinned()) {
+ __ move(LIR_OprFact::value_type(default_value->type()), result);
+ } else {
+ __ move(load_constant(default_value), result);
+ }
+ __ branch_destination(L_end->label());
+ set_in_conditional_code(false);
+ }
+ }
+
+ void LIRGenerator::access_flat_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item,
+ ciField* field, int sub_offset) {
+ assert(sub_offset == 0 || field != nullptr, "Sanity check");
+
+ // Find the starting address of the source (inside the array)
+ LIR_Opr elm_op = get_and_load_element_address(array, index);
+
+ ciInlineKlass* elem_klass = nullptr;
+ if (field != nullptr) {
+ elem_klass = field->type()->as_inline_klass();
+ } else {
+ elem_klass = array.value()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass();
+ }
+ for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) {
+ ciField* inner_field = elem_klass->nonstatic_field_at(i);
+ assert(!inner_field->is_flat(), "flat fields must have been expanded");
+ int obj_offset = inner_field->offset_in_bytes();
+ int elm_offset = obj_offset - elem_klass->first_field_offset() + sub_offset; // object header is not stored in array.
+ BasicType field_type = inner_field->type()->basic_type();
+
+ // Types which are smaller than int are still passed in an int register.
+ BasicType reg_type = field_type;
+ switch (reg_type) {
+ case T_BYTE:
+ case T_BOOLEAN:
+ case T_SHORT:
+ case T_CHAR:
+ reg_type = T_INT;
+ break;
+ default:
+ break;
+ }
+
+ LIR_Opr temp = new_register(reg_type);
+ TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op);
+ LIRItem elm_item(elm_resolved_addr, this);
+
+ DecoratorSet decorators = IN_HEAP;
+ if (is_load) {
+ access_load_at(decorators, field_type,
+ elm_item, LIR_OprFact::intConst(elm_offset), temp,
+ nullptr, nullptr);
+ access_store_at(decorators, field_type,
+ obj_item, LIR_OprFact::intConst(obj_offset), temp,
+ nullptr, nullptr);
+ } else {
+ access_load_at(decorators, field_type,
+ obj_item, LIR_OprFact::intConst(obj_offset), temp,
+ nullptr, nullptr);
+ access_store_at(decorators, field_type,
+ elm_item, LIR_OprFact::intConst(elm_offset), temp,
+ nullptr, nullptr);
+ }
+ }
+ }
+
+ void LIRGenerator::check_flat_array(LIR_Opr array, LIR_Opr value, CodeStub* slow_path) {
+ LIR_Opr tmp = new_register(T_METADATA);
+ __ check_flat_array(array, value, tmp, slow_path);
+ }
+
+ void LIRGenerator::check_null_free_array(LIRItem& array, LIRItem& value, CodeEmitInfo* info) {
+ LabelObj* L_end = new LabelObj();
+ LIR_Opr tmp = new_register(T_METADATA);
+ __ check_null_free_array(array.result(), tmp);
+ __ branch(lir_cond_equal, L_end->label());
+ __ null_check(value.result(), info);
+ __ branch_destination(L_end->label());
+ }
+
+ bool LIRGenerator::needs_flat_array_store_check(StoreIndexed* x) {
+ if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
+ ciType* type = x->value()->declared_type();
+ if (type != nullptr && type->is_klass()) {
+ ciKlass* klass = type->as_klass();
+ if (!klass->can_be_inline_klass() || (klass->is_inlinetype() && !klass->as_inline_klass()->flat_in_array())) {
+ // This is known to be a non-flat object. If the array is a flat array,
+ // it will be caught by the code generated by array_store_check().
+ return false;
+ }
+ }
+ // We're not 100% sure, so let's do the flat_array_store_check.
+ return true;
+ }
+ return false;
+ }
+
+ bool LIRGenerator::needs_null_free_array_store_check(StoreIndexed* x) {
+ return x->elt_type() == T_OBJECT && x->array()->maybe_null_free_array();
+ }
+
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
+ assert(x->elt_type() != T_ARRAY, "never used");
+ bool is_loaded_flat_array = x->array()->is_loaded_flat_array();
bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != nullptr;
bool obj_store = is_reference_type(x->elt_type());
! bool needs_store_check = obj_store && !(is_loaded_flat_array && x->is_exact_flat_array_store()) &&
! (x->value()->as_Constant() == nullptr ||
! !get_jobject_constant(x->value())->is_null_object());
LIRItem array(x->array(), this);
LIRItem index(x->index(), this);
LIRItem value(x->value(), this);
LIRItem length(this);
index.load_nonconstant();
if (use_length && needs_range_check) {
length.set_instruction(x->length());
length.load_item();
-
}
! if (needs_store_check || x->check_boolean()) {
value.load_item();
} else {
value.load_for_store(x->elt_type());
}
index.load_nonconstant();
if (use_length && needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
!
+ if (needs_store_check || x->check_boolean()
+ || is_loaded_flat_array || needs_flat_array_store_check(x) || needs_null_free_array_store_check(x)) {
value.load_item();
} else {
value.load_for_store(x->elt_type());
}
// range_check also does the null check
null_check_info = nullptr;
}
}
if (GenerateArrayStoreCheck && needs_store_check) {
CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
}
! DecoratorSet decorators = IN_HEAP | IS_ARRAY;
! if (x->check_boolean()) {
! decorators |= C1_MASK_BOOLEAN;
! }
! access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
! nullptr, null_check_info);
}
void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIR_Opr offset, LIR_Opr result,
CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
// range_check also does the null check
null_check_info = nullptr;
}
}
+ if (x->should_profile()) {
+ if (x->array()->is_loaded_flat_array()) {
+ // No need to profile a store to a flat array of known type. This can happen if
+ // the type only became known after optimizations (for example, after the PhiSimplifier).
+ x->set_should_profile(false);
+ } else {
+ int bci = x->profiled_bci();
+ ciMethodData* md = x->profiled_method()->method_data();
+ assert(md != nullptr, "Sanity");
+ ciProfileData* data = md->bci_to_data(bci);
+ assert(data != nullptr && data->is_ArrayStoreData(), "incorrect profiling entry");
+ ciArrayStoreData* store_data = (ciArrayStoreData*)data;
+ profile_array_type(x, md, store_data);
+ assert(store_data->is_ArrayStoreData(), "incorrect profiling entry");
+ if (x->array()->maybe_null_free_array()) {
+ profile_null_free_array(array, md, store_data);
+ }
+ }
+ }
+
if (GenerateArrayStoreCheck && needs_store_check) {
CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
}
! if (is_loaded_flat_array) {
! if (!x->value()->is_null_free()) {
! __ null_check(value.result(), new CodeEmitInfo(range_check_info));
! }
+ // If array element is an empty inline type, no need to copy anything
+ if (!x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) {
+ access_flat_array(false, array, index, value);
+ }
+ } else {
+ StoreFlattenedArrayStub* slow_path = nullptr;
+
+ if (needs_flat_array_store_check(x)) {
+ // Check if we indeed have a flat array
+ index.load_item();
+ slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x, x->state_before()));
+ check_flat_array(array.result(), value.result(), slow_path);
+ set_in_conditional_code(true);
+ } else if (needs_null_free_array_store_check(x)) {
+ CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
+ check_null_free_array(array, value, info);
+ }
+
+ DecoratorSet decorators = IN_HEAP | IS_ARRAY;
+ if (x->check_boolean()) {
+ decorators |= C1_MASK_BOOLEAN;
+ }
! access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
! nullptr, null_check_info);
+ if (slow_path != nullptr) {
+ __ branch_destination(slow_path->continuation());
+ set_in_conditional_code(false);
+ }
+ }
}
void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIR_Opr offset, LIR_Opr result,
CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
LIR_Opr result = rlock_result(x, field_type);
access_load_at(decorators, field_type,
object, LIR_OprFact::intConst(x->offset()), result,
info ? new CodeEmitInfo(info) : nullptr, info);
+
+ ciField* field = x->field();
+ if (field->is_null_free()) {
+ // Load from non-flat inline type field requires
+ // a null check to replace null with the default value.
+ ciInstanceKlass* holder = field->holder();
+ if (field->is_static() && holder->is_loaded()) {
+ ciObject* val = holder->java_mirror()->field_value(field).as_object();
+ if (!val->is_null_object()) {
+ // Static field is initialized, we don't need to perform a null check.
+ return;
+ }
+ }
+ ciInlineKlass* inline_klass = field->type()->as_inline_klass();
+ if (inline_klass->is_initialized()) {
+ LabelObj* L_end = new LabelObj();
+ __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(nullptr));
+ __ branch(lir_cond_notEqual, L_end->label());
+ set_in_conditional_code(true);
+ Constant* default_value = new Constant(new InstanceConstant(inline_klass->default_instance()));
+ if (default_value->is_pinned()) {
+ __ move(LIR_OprFact::value_type(default_value->type()), result);
+ } else {
+ __ move(load_constant(default_value), result);
+ }
+ __ branch_destination(L_end->label());
+ set_in_conditional_code(false);
+ } else {
+ info = state_for(x, x->state_before());
+ __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(nullptr));
+ __ branch(lir_cond_equal, new DeoptimizeStub(info, Deoptimization::Reason_uninitialized,
+ Deoptimization::Action_make_not_entrant));
+ }
+ }
}
// int/long jdk.internal.util.Preconditions.checkIndex
void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
assert(x->number_of_arguments() == 3, "wrong type");
// The range check performs the null check, so clear it out for the load
null_check_info = nullptr;
}
}
! DecoratorSet decorators = IN_HEAP | IS_ARRAY;
! LIR_Opr result = rlock_result(x, x->elt_type());
! access_load_at(decorators, x->elt_type(),
! array, index.result(), result,
! nullptr, null_check_info);
}
void LIRGenerator::do_NullCheck(NullCheck* x) {
if (x->can_trap()) {
// The range check performs the null check, so clear it out for the load
null_check_info = nullptr;
}
}
! ciMethodData* md = nullptr;
+ ciArrayLoadData* load_data = nullptr;
+ if (x->should_profile()) {
+ if (x->array()->is_loaded_flat_array()) {
+ // No need to profile a load from a flat array of known type. This can happen if
+ // the type only became known after optimizations (for example, after the PhiSimplifier).
+ x->set_should_profile(false);
+ } else {
+ int bci = x->profiled_bci();
+ md = x->profiled_method()->method_data();
+ assert(md != nullptr, "Sanity");
+ ciProfileData* data = md->bci_to_data(bci);
+ assert(data != nullptr && data->is_ArrayLoadData(), "incorrect profiling entry");
+ load_data = (ciArrayLoadData*)data;
+ profile_array_type(x, md, load_data);
+ }
+ }
+
+ Value element;
+ if (x->vt() != nullptr) {
+ assert(x->array()->is_loaded_flat_array(), "must be");
+ // Find the destination address (of the NewInlineTypeInstance).
+ LIRItem obj_item(x->vt(), this);
+
+ access_flat_array(true, array, index, obj_item,
+ x->delayed() == nullptr ? 0 : x->delayed()->field(),
+ x->delayed() == nullptr ? 0 : x->delayed()->offset());
+ set_no_result(x);
+ } else if (x->delayed() != nullptr) {
+ assert(x->array()->is_loaded_flat_array(), "must be");
+ LIR_Opr result = rlock_result(x, x->delayed()->field()->type()->basic_type());
+ access_sub_element(array, index, result, x->delayed()->field(), x->delayed()->offset());
+ } else if (x->array() != nullptr && x->array()->is_loaded_flat_array() &&
+ x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_initialized() &&
+ x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) {
+ // Load the default instance instead of reading the element
+ ciInlineKlass* elem_klass = x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass();
+ LIR_Opr result = rlock_result(x, x->elt_type());
+ assert(elem_klass->is_initialized(), "Must be");
+ Constant* default_value = new Constant(new InstanceConstant(elem_klass->default_instance()));
+ if (default_value->is_pinned()) {
+ __ move(LIR_OprFact::value_type(default_value->type()), result);
+ } else {
+ __ move(load_constant(default_value), result);
+ }
+ } else {
+ LIR_Opr result = rlock_result(x, x->elt_type());
+ LoadFlattenedArrayStub* slow_path = nullptr;
+
+ if (x->should_profile() && x->array()->maybe_null_free_array()) {
+ profile_null_free_array(array, md, load_data);
+ }
+
+ if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
+ assert(x->delayed() == nullptr, "Delayed LoadIndexed only apply to loaded_flat_arrays");
+ index.load_item();
+ // if we are loading from a flat array, load it using a runtime call
+ slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x, x->state_before()));
+ check_flat_array(array.result(), LIR_OprFact::illegalOpr, slow_path);
+ set_in_conditional_code(true);
+ }
+
+ DecoratorSet decorators = IN_HEAP | IS_ARRAY;
+ access_load_at(decorators, x->elt_type(),
+ array, index.result(), result,
+ nullptr, null_check_info);
! if (slow_path != nullptr) {
! __ branch_destination(slow_path->continuation());
! set_in_conditional_code(false);
! }
+
+ element = x;
+ }
+
+ if (x->should_profile()) {
+ profile_element_type(element, md, load_data);
+ }
}
void LIRGenerator::do_NullCheck(NullCheck* x) {
if (x->can_trap()) {
if (!do_null && !do_update) {
return result;
}
ciKlass* exact_signature_k = nullptr;
! if (do_update) {
// Is the type from the signature exact (the only one possible)?
exact_signature_k = signature_at_call_k->exact_klass();
if (exact_signature_k == nullptr) {
exact_signature_k = comp->cha_exact_type(signature_at_call_k);
} else {
if (!do_null && !do_update) {
return result;
}
ciKlass* exact_signature_k = nullptr;
! if (do_update && signature_at_call_k != nullptr) {
// Is the type from the signature exact (the only one possible)?
exact_signature_k = signature_at_call_k->exact_klass();
if (exact_signature_k == nullptr) {
exact_signature_k = comp->cha_exact_type(signature_at_call_k);
} else {
}
}
}
}
+ void LIRGenerator::profile_flags(ciMethodData* md, ciProfileData* data, int flag, LIR_Condition condition) {
+ assert(md != nullptr && data != nullptr, "should have been initialized");
+ LIR_Opr mdp = new_register(T_METADATA);
+ __ metadata2reg(md->constant_encoding(), mdp);
+ LIR_Address* addr = new LIR_Address(mdp, md->byte_offset_of_slot(data, DataLayout::flags_offset()), T_BYTE);
+ LIR_Opr flags = new_register(T_INT);
+ __ move(addr, flags);
+ if (condition != lir_cond_always) {
+ LIR_Opr update = new_register(T_INT);
+ __ cmove(condition, LIR_OprFact::intConst(0), LIR_OprFact::intConst(flag), update, T_INT);
+ } else {
+ __ logical_or(flags, LIR_OprFact::intConst(flag), flags);
+ }
+ __ store(flags, addr);
+ }
+
+ template <class ArrayData> void LIRGenerator::profile_null_free_array(LIRItem array, ciMethodData* md, ArrayData* load_store) {
+ assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
+ LabelObj* L_end = new LabelObj();
+ LIR_Opr tmp = new_register(T_METADATA);
+ __ check_null_free_array(array.result(), tmp);
+
+ profile_flags(md, load_store, ArrayStoreData::null_free_array_byte_constant(), lir_cond_equal);
+ }
+
+ template <class ArrayData> void LIRGenerator::profile_array_type(AccessIndexed* x, ciMethodData*& md, ArrayData*& load_store) {
+ assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
+ LIR_Opr mdp = LIR_OprFact::illegalOpr;
+ profile_type(md, md->byte_offset_of_slot(load_store, ArrayData::array_offset()), 0,
+ load_store->array()->type(), x->array(), mdp, true, nullptr, nullptr);
+ }
+
+ void LIRGenerator::profile_element_type(Value element, ciMethodData* md, ciArrayLoadData* load_data) {
+ assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
+ assert(md != nullptr && load_data != nullptr, "should have been initialized");
+ LIR_Opr mdp = LIR_OprFact::illegalOpr;
+ profile_type(md, md->byte_offset_of_slot(load_data, ArrayLoadData::element_offset()), 0,
+ load_data->element()->type(), element, mdp, false, nullptr, nullptr);
+ }
+
void LIRGenerator::do_Base(Base* x) {
__ std_entry(LIR_OprFact::illegalOpr);
// Emit moves from physical registers / stack slots to virtual registers
CallingConvention* args = compilation()->frame_map()->incoming_arguments();
IRScope* irScope = compilation()->hir()->top_scope();
if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
profile_parameters(x);
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, false);
increment_invocation_counter(info);
}
+ if (method()->has_scalarized_args()) {
+ // Check if deoptimization was triggered (i.e. orig_pc was set) while buffering scalarized inline type arguments
+ // in the entry point (see comments in frame::deoptimize). If so, deoptimize only now that we have the right state.
+ CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), nullptr, false);
+ CodeStub* deopt_stub = new DeoptimizeStub(info, Deoptimization::Reason_none, Deoptimization::Action_none);
+ __ append(new LIR_Op0(lir_check_orig_pc));
+ __ branch(lir_cond_notEqual, deopt_stub);
+ }
// all blocks with a successor must end with an unconditional jump
// to the successor even if they are consecutive
__ jump(x->default_sux());
}
__ osr_entry(LIR_Assembler::osrBufferPointer());
LIR_Opr result = rlock_result(x);
__ move(LIR_Assembler::osrBufferPointer(), result);
}
void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
assert(args->length() == arg_list->length(),
"args=%d, arg_list=%d", args->length(), arg_list->length());
for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
LIRItem* param = args->at(i);
LIR_Opr loc = arg_list->at(i);
! if (loc->is_register()) {
- param->load_item_force(loc);
- } else {
- LIR_Address* addr = loc->as_address_ptr();
- param->load_for_store(addr->type());
- if (addr->type() == T_OBJECT) {
- __ move_wide(param->result(), addr);
- } else
- __ move(param->result(), addr);
- }
}
if (x->has_receiver()) {
LIRItem* receiver = args->at(0);
LIR_Opr loc = arg_list->at(0);
__ osr_entry(LIR_Assembler::osrBufferPointer());
LIR_Opr result = rlock_result(x);
__ move(LIR_Assembler::osrBufferPointer(), result);
}
+ void LIRGenerator::invoke_load_one_argument(LIRItem* param, LIR_Opr loc) {
+ if (loc->is_register()) {
+ param->load_item_force(loc);
+ } else {
+ LIR_Address* addr = loc->as_address_ptr();
+ param->load_for_store(addr->type());
+ if (addr->type() == T_OBJECT) {
+ __ move_wide(param->result(), addr);
+ } else {
+ __ move(param->result(), addr);
+ }
+ }
+ }
void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
assert(args->length() == arg_list->length(),
"args=%d, arg_list=%d", args->length(), arg_list->length());
for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
LIRItem* param = args->at(i);
LIR_Opr loc = arg_list->at(i);
! invoke_load_one_argument(param, loc);
}
if (x->has_receiver()) {
LIRItem* receiver = args->at(0);
LIR_Opr loc = arg_list->at(0);
#endif
LIRItem left(x->x(), this);
LIRItem right(x->y(), this);
left.load_item();
! if (can_inline_as_constant(right.value())) {
right.dont_load_item();
} else {
right.load_item();
}
LIRItem t_val(x->tval(), this);
LIRItem f_val(x->fval(), this);
t_val.dont_load_item();
f_val.dont_load_item();
- LIR_Opr reg = rlock_result(x);
! __ cmp(lir_cond(x->cond()), left.result(), right.result());
! __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
}
void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
assert(x->number_of_arguments() == 0, "wrong type");
// Enforce computation of _reserved_argument_area_size which is required on some platforms.
#endif
LIRItem left(x->x(), this);
LIRItem right(x->y(), this);
left.load_item();
! if (can_inline_as_constant(right.value()) && !x->substitutability_check()) {
right.dont_load_item();
} else {
+ // substitutability_check() needs to use right as a base register.
right.load_item();
}
LIRItem t_val(x->tval(), this);
LIRItem f_val(x->fval(), this);
t_val.dont_load_item();
f_val.dont_load_item();
! if (x->substitutability_check()) {
! substitutability_check(x, left, right, t_val, f_val);
+ } else {
+ LIR_Opr reg = rlock_result(x);
+ __ cmp(lir_cond(x->cond()), left.result(), right.result());
+ __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
+ }
+ }
+
+ void LIRGenerator::substitutability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val) {
+ assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
+ bool is_acmpeq = (x->cond() == If::eql);
+ LIR_Opr equal_result = is_acmpeq ? t_val.result() : f_val.result();
+ LIR_Opr not_equal_result = is_acmpeq ? f_val.result() : t_val.result();
+ LIR_Opr result = rlock_result(x);
+ CodeEmitInfo* info = state_for(x, x->state_before());
+
+ substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
+ }
+
+ void LIRGenerator::substitutability_check(If* x, LIRItem& left, LIRItem& right) {
+ LIR_Opr equal_result = LIR_OprFact::intConst(1);
+ LIR_Opr not_equal_result = LIR_OprFact::intConst(0);
+ LIR_Opr result = new_register(T_INT);
+ CodeEmitInfo* info = state_for(x, x->state_before());
+
+ substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
+
+ assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
+ __ cmp(lir_cond(x->cond()), result, equal_result);
+ }
+
+ void LIRGenerator::substitutability_check_common(Value left_val, Value right_val, LIRItem& left, LIRItem& right,
+ LIR_Opr equal_result, LIR_Opr not_equal_result, LIR_Opr result,
+ CodeEmitInfo* info) {
+ LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
+ LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
+ LIR_Opr left_klass_op = LIR_OprFact::illegalOpr;
+ LIR_Opr right_klass_op = LIR_OprFact::illegalOpr;
+
+ ciKlass* left_klass = left_val ->as_loaded_klass_or_null();
+ ciKlass* right_klass = right_val->as_loaded_klass_or_null();
+
+ if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
+ !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
+ init_temps_for_substitutability_check(tmp1, tmp2);
+ }
+
+ if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
+ // No need to load klass -- the operands are statically known to be the same inline klass.
+ } else {
+ BasicType t_klass = UseCompressedOops ? T_INT : T_METADATA;
+ left_klass_op = new_register(t_klass);
+ right_klass_op = new_register(t_klass);
+ }
+
+ CodeStub* slow_path = new SubstitutabilityCheckStub(left.result(), right.result(), info);
+ __ substitutability_check(result, left.result(), right.result(), equal_result, not_equal_result,
+ tmp1, tmp2,
+ left_klass, right_klass, left_klass_op, right_klass_op, info, slow_path);
}
void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
assert(x->number_of_arguments() == 0, "wrong type");
// Enforce computation of _reserved_argument_area_size which is required on some platforms.
ciMethodData* md = x->method()->method_data_or_null();
assert(md != nullptr, "Sanity");
ciProfileData* data = md->bci_to_data(bci);
if (data != nullptr) {
assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
! ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
LIR_Opr mdp = LIR_OprFact::illegalOpr;
bool ignored_will_link;
ciSignature* signature_at_call = nullptr;
x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
ciMethodData* md = x->method()->method_data_or_null();
assert(md != nullptr, "Sanity");
ciProfileData* data = md->bci_to_data(bci);
if (data != nullptr) {
assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
! ciSingleTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
LIR_Opr mdp = LIR_OprFact::illegalOpr;
bool ignored_will_link;
ciSignature* signature_at_call = nullptr;
x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
md->set_return_type(bci, exact);
}
}
}
+ bool LIRGenerator::profile_inline_klass(ciMethodData* md, ciProfileData* data, Value value, int flag) {
+ ciKlass* klass = value->as_loaded_klass_or_null();
+ if (klass != nullptr) {
+ if (klass->is_inlinetype()) {
+ profile_flags(md, data, flag, lir_cond_always);
+ } else if (klass->can_be_inline_klass()) {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ return true;
+ }
+
+
+ void LIRGenerator::do_ProfileACmpTypes(ProfileACmpTypes* x) {
+ ciMethod* method = x->method();
+ assert(method != nullptr, "method should be set if branch is profiled");
+ ciMethodData* md = method->method_data_or_null();
+ assert(md != nullptr, "Sanity");
+ ciProfileData* data = md->bci_to_data(x->bci());
+ assert(data != nullptr, "must have profiling data");
+ assert(data->is_ACmpData(), "need BranchData for two-way branches");
+ ciACmpData* acmp = (ciACmpData*)data;
+ LIR_Opr mdp = LIR_OprFact::illegalOpr;
+ profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()), 0,
+ acmp->left()->type(), x->left(), mdp, !x->left_maybe_null(), nullptr, nullptr);
+ int flags_offset = md->byte_offset_of_slot(data, DataLayout::flags_offset());
+ if (!profile_inline_klass(md, acmp, x->left(), ACmpData::left_inline_type_byte_constant())) {
+ LIR_Opr mdp = new_register(T_METADATA);
+ __ metadata2reg(md->constant_encoding(), mdp);
+ LIRItem value(x->left(), this);
+ value.load_item();
+ __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::left_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
+ }
+ profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()),
+ in_bytes(ACmpData::right_offset()) - in_bytes(ACmpData::left_offset()),
+ acmp->right()->type(), x->right(), mdp, !x->right_maybe_null(), nullptr, nullptr);
+ if (!profile_inline_klass(md, acmp, x->right(), ACmpData::right_inline_type_byte_constant())) {
+ LIR_Opr mdp = new_register(T_METADATA);
+ __ metadata2reg(md->constant_encoding(), mdp);
+ LIRItem value(x->right(), this);
+ value.load_item();
+ __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::right_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
+ }
+ }
+
void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
// We can safely ignore accessors here, since c2 will inline them anyway,
// accessors are also always mature.
if (!x->inlinee()->is_accessor()) {
CodeEmitInfo* info = state_for(x, x->state(), true);
< prev index next >