< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.cpp

Print this page

        

*** 32,41 **** --- 32,43 ---- #include "c1/c1_ValueStack.hpp" #include "ci/ciArrayKlass.hpp" #include "ci/ciInstance.hpp" #include "ci/ciObjArray.hpp" #include "ci/ciUtilities.hpp" + #include "ci/ciValueArrayKlass.hpp" + #include "ci/ciValueKlass.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/c1/barrierSetC1.hpp" #include "runtime/arguments.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp"
*** 207,216 **** --- 209,220 ---- _result = opr; } void LIRItem::load_item() { + assert(!_gen->in_conditional_code(), "LIRItem cannot be loaded in conditional code"); + if (result()->is_illegal()) { // update the items result _result = value()->operand(); } if (!result()->is_register()) {
*** 634,650 **** default: ShouldNotReachHere(); } } ! void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) { if (!GenerateSynchronizationCode) return; // for slow path, use debug info for state after successful locking ! CodeStub* slow_path = new MonitorEnterStub(object, lock, info); __ load_stack_address_monitor(monitor_no, lock); // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter ! __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception); } void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) { if (!GenerateSynchronizationCode) return; --- 638,655 ---- default: ShouldNotReachHere(); } } ! void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, ! CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_imse_stub) { if (!GenerateSynchronizationCode) return; // for slow path, use debug info for state after successful locking ! CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_imse_stub, scratch); __ load_stack_address_monitor(monitor_no, lock); // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter ! __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_imse_stub); } void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) { if (!GenerateSynchronizationCode) return;
*** 782,791 **** --- 787,806 ---- // if a probable array type has been identified, figure out if any // of the required checks for a fast case can be elided. int flags = LIR_OpArrayCopy::all_flags; + if (!src->is_loaded_flattened_array() && !dst->is_loaded_flattened_array()) { + flags &= ~LIR_OpArrayCopy::always_slow_path; + } + if (!src->maybe_flattened_array()) { + flags &= ~LIR_OpArrayCopy::src_valuetype_check; + } + if (!dst->maybe_flattened_array() && !dst->maybe_null_free_array()) { + flags &= ~LIR_OpArrayCopy::dst_valuetype_check; + } + if (!src_objarray) flags &= ~LIR_OpArrayCopy::src_objarray; if (!dst_objarray) flags &= ~LIR_OpArrayCopy::dst_objarray;
*** 1419,1433 **** } } LIR_Opr result = new_register(t); __ move((LIR_Opr)c, result); ! _constants.append(c); ! _reg_for_constants.append(result); return result; } //------------------------field access-------------------------------------- void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { assert(x->number_of_arguments() == 4, "wrong type"); LIRItem obj (x->argument_at(0), this); // object --- 1434,1456 ---- } } LIR_Opr result = new_register(t); __ move((LIR_Opr)c, result); ! if (!in_conditional_code()) { ! _constants.append(c); ! _reg_for_constants.append(result); ! } return result; } + void LIRGenerator::set_in_conditional_code(bool v) { + assert(v != _in_conditional_code, "must change state"); + _in_conditional_code = v; + } + + //------------------------field access-------------------------------------- void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { assert(x->number_of_arguments() == 4, "wrong type"); LIRItem obj (x->argument_at(0), this); // object
*** 1522,1535 **** #endif if (x->needs_null_check() && (needs_patching || MacroAssembler::needs_explicit_null_check(x->offset()))) { ! // Emit an explicit null check because the offset is too large. ! // If the class is not loaded and the object is NULL, we need to deoptimize to throw a ! // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code. ! __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching); } DecoratorSet decorators = IN_HEAP; if (is_volatile) { decorators |= MO_SEQ_CST; --- 1545,1570 ---- #endif if (x->needs_null_check() && (needs_patching || MacroAssembler::needs_explicit_null_check(x->offset()))) { ! if (needs_patching && x->field()->is_flattenable()) { ! // We are storing a field of type "QT;" into holder class H, but H is not yet ! // loaded. (If H had been loaded, then T must also have already been loaded ! // due to the "Q" signature, and needs_patching would be false). ! assert(!x->field()->holder()->is_loaded(), "must be"); ! // We don't know the offset of this field. Let's deopt and recompile. ! CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info), ! Deoptimization::Reason_unloaded, ! Deoptimization::Action_make_not_entrant); ! __ branch(lir_cond_always, T_ILLEGAL, stub); ! } else { ! // Emit an explicit null check because the offset is too large. ! // If the class is not loaded and the object is NULL, we need to deoptimize to throw a ! // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code. ! __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching); ! } } DecoratorSet decorators = IN_HEAP; if (is_volatile) { decorators |= MO_SEQ_CST;
*** 1540,1555 **** access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()), value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info); } void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { assert(x->is_pinned(),""); bool needs_range_check = x->compute_needs_range_check(); bool use_length = x->length() != NULL; ! bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; ! bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || !get_jobject_constant(x->value())->is_null_object() || x->should_profile()); LIRItem array(x->array(), this); LIRItem index(x->index(), this); --- 1575,1720 ---- access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()), value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info); } + // FIXME -- I can't find any other way to pass an address to access_load_at(). + class TempResolvedAddress: public Instruction { + public: + TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) { + set_operand(addr); + } + virtual void input_values_do(ValueVisitor*) {} + virtual void visit(InstructionVisitor* v) {} + virtual const char* name() const { return "TempResolvedAddress"; } + }; + + void LIRGenerator::access_flattened_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item) { + // Find the starting address of the source (inside the array) + ciType* array_type = array.value()->declared_type(); + ciValueArrayKlass* value_array_klass = array_type->as_value_array_klass(); + assert(value_array_klass->is_loaded(), "must be"); + + ciValueKlass* elem_klass = value_array_klass->element_klass()->as_value_klass(); + int array_header_size = value_array_klass->array_header_in_bytes(); + int shift = value_array_klass->log2_element_size(); + + #ifndef _LP64 + LIR_Opr index_op = new_register(T_INT); + // FIXME -- on 32-bit, the shift below can overflow, so we need to check that + // the top (shift+1) bits of index_op must be zero, or + // else throw ArrayIndexOutOfBoundsException + if (index.result()->is_constant()) { + jint const_index = index.result()->as_jint(); + __ move(LIR_OprFact::intConst(const_index << shift), index_op); + } else { + __ shift_left(index_op, shift, index.result()); + } + #else + LIR_Opr index_op = new_register(T_LONG); + if (index.result()->is_constant()) { + jint const_index = index.result()->as_jint(); + __ move(LIR_OprFact::longConst(const_index << shift), index_op); + } else { + __ convert(Bytecodes::_i2l, index.result(), index_op); + // Need to shift manually, as LIR_Address can scale only up to 3. + __ shift_left(index_op, shift, index_op); + } + #endif + + LIR_Opr elm_op = new_pointer_register(); + LIR_Address* elm_address = new LIR_Address(array.result(), index_op, array_header_size, T_ADDRESS); + __ leal(LIR_OprFact::address(elm_address), elm_op); + + for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) { + ciField* inner_field = elem_klass->nonstatic_field_at(i); + assert(!inner_field->is_flattened(), "flattened fields must have been expanded"); + int obj_offset = inner_field->offset(); + int elm_offset = obj_offset - elem_klass->first_field_offset(); // object header is not stored in array. + + BasicType field_type = inner_field->type()->basic_type(); + switch (field_type) { + case T_BYTE: + case T_BOOLEAN: + case T_SHORT: + case T_CHAR: + field_type = T_INT; + break; + default: + break; + } + + LIR_Opr temp = new_register(field_type); + TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op); + LIRItem elm_item(elm_resolved_addr, this); + + DecoratorSet decorators = IN_HEAP; + if (is_load) { + access_load_at(decorators, field_type, + elm_item, LIR_OprFact::intConst(elm_offset), temp, + NULL, NULL); + access_store_at(decorators, field_type, + obj_item, LIR_OprFact::intConst(obj_offset), temp, + NULL, NULL); + } else { + access_load_at(decorators, field_type, + obj_item, LIR_OprFact::intConst(obj_offset), temp, + NULL, NULL); + access_store_at(decorators, field_type, + elm_item, LIR_OprFact::intConst(elm_offset), temp, + NULL, NULL); + } + } + } + + void LIRGenerator::check_flattened_array(LIR_Opr array, LIR_Opr value, CodeStub* slow_path) { + LIR_Opr tmp = new_register(T_METADATA); + __ check_flattened_array(array, value, tmp, slow_path); + } + + void LIRGenerator::check_null_free_array(LIRItem& array, LIRItem& value, CodeEmitInfo* info) { + LabelObj* L_end = new LabelObj(); + LIR_Opr tmp = new_register(T_METADATA); + __ check_null_free_array(array.result(), tmp); + __ branch(lir_cond_equal, T_ILLEGAL, L_end->label()); + __ null_check(value.result(), info); + __ branch_destination(L_end->label()); + } + + bool LIRGenerator::needs_flattened_array_store_check(StoreIndexed* x) { + if (ValueArrayFlatten && x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) { + ciType* type = x->value()->declared_type(); + if (type != NULL && type->is_klass()) { + ciKlass* klass = type->as_klass(); + if (klass->is_loaded() && + !(klass->is_valuetype() && klass->as_value_klass()->flatten_array()) && + !klass->is_java_lang_Object() && + !klass->is_interface()) { + // This is known to be a non-flattenable object. If the array is flattened, + // it will be caught by the code generated by array_store_check(). + return false; + } + } + // We're not 100% sure, so let's do the flattened_array_store_check. + return true; + } + return false; + } + + bool LIRGenerator::needs_null_free_array_store_check(StoreIndexed* x) { + return x->elt_type() == T_OBJECT && x->array()->maybe_null_free_array(); + } + void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { assert(x->is_pinned(),""); + assert(x->elt_type() != T_ARRAY, "never used"); + bool is_loaded_flattened_array = x->array()->is_loaded_flattened_array(); bool needs_range_check = x->compute_needs_range_check(); bool use_length = x->length() != NULL; ! bool obj_store = x->elt_type() == T_OBJECT; ! bool needs_store_check = obj_store && !(is_loaded_flattened_array && x->is_exact_flattened_array_store()) && ! (x->value()->as_Constant() == NULL || !get_jobject_constant(x->value())->is_null_object() || x->should_profile()); LIRItem array(x->array(), this); LIRItem index(x->index(), this);
*** 1560,1572 **** index.load_nonconstant(); if (use_length && needs_range_check) { length.set_instruction(x->length()); length.load_item(); - } ! if (needs_store_check || x->check_boolean()) { value.load_item(); } else { value.load_for_store(x->elt_type()); } --- 1725,1738 ---- index.load_nonconstant(); if (use_length && needs_range_check) { length.set_instruction(x->length()); length.load_item(); } ! ! if (needs_store_check || x->check_boolean() ! || is_loaded_flattened_array || needs_flattened_array_store_check(x) || needs_null_free_array_store_check(x)) { value.load_item(); } else { value.load_for_store(x->elt_type()); }
*** 1595,1611 **** if (GenerateArrayStoreCheck && needs_store_check) { CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info); array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci()); } ! DecoratorSet decorators = IN_HEAP | IS_ARRAY; ! if (x->check_boolean()) { ! decorators |= C1_MASK_BOOLEAN; } - - access_store_at(decorators, x->elt_type(), array, index.result(), value.result(), - NULL, null_check_info); } void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type, LIRItem& base, LIR_Opr offset, LIR_Opr result, CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) { --- 1761,1801 ---- if (GenerateArrayStoreCheck && needs_store_check) { CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info); array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci()); } ! if (is_loaded_flattened_array) { ! if (!x->value()->is_never_null()) { ! __ null_check(value.result(), new CodeEmitInfo(range_check_info)); ! } ! access_flattened_array(false, array, index, value); ! } else { ! StoreFlattenedArrayStub* slow_path = NULL; ! ! if (needs_flattened_array_store_check(x)) { ! // Check if we indeed have a flattened array ! index.load_item(); ! slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x)); ! check_flattened_array(array.result(), value.result(), slow_path); ! set_in_conditional_code(true); ! } else if (needs_null_free_array_store_check(x)) { ! CodeEmitInfo* info = new CodeEmitInfo(range_check_info); ! check_null_free_array(array, value, info); ! } ! ! DecoratorSet decorators = IN_HEAP | IS_ARRAY; ! if (x->check_boolean()) { ! decorators |= C1_MASK_BOOLEAN; ! } ! ! access_store_at(decorators, x->elt_type(), array, index.result(), value.result(), ! NULL, null_check_info); ! if (slow_path != NULL) { ! __ branch_destination(slow_path->continuation()); ! set_in_conditional_code(false); ! } } } void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type, LIRItem& base, LIR_Opr offset, LIR_Opr result, CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
*** 1691,1700 **** --- 1881,1964 ---- } return _barrier_set->resolve(this, decorators, obj); } + Constant* LIRGenerator::flattenable_load_field_prolog(LoadField* x, CodeEmitInfo* info) { + ciField* field = x->field(); + ciInstanceKlass* holder = field->holder(); + Constant* default_value = NULL; + + // Unloaded "QV;" klasses are represented by a ciInstanceKlass + bool field_type_unloaded = field->type()->is_instance_klass() && !field->type()->as_instance_klass()->is_loaded(); + + // Check for edge cases (1), (2) and (3) for getstatic and getfield + bool deopt = false; + bool need_default = false; + if (field->is_static()) { + // (1) holder is unloaded -- no problem: it will be loaded by patching, and field offset will be determined. + // No check needed here. + + if (field_type_unloaded) { + // (2) field type is unloaded -- problem: we don't know what the default value is. Let's deopt. + // FIXME: consider getting the default value in patching code. + deopt = true; + } else { + need_default = true; + } + + // (3) field is not flattened -- we don't care: static fields are never flattened. + // No check needed here. + } else { + if (!holder->is_loaded()) { + // (1) holder is unloaded -- problem: we needed the field offset back in GraphBuilder::access_field() + // FIXME: consider getting field offset in patching code (but only if the field + // type was loaded at compilation time). + deopt = true; + } else if (field_type_unloaded) { + // (2) field type is unloaded -- problem: we don't know whether it's flattened or not. Let's deopt + deopt = true; + } else if (!field->is_flattened()) { + // (3) field is not flattened -- need default value in cases of uninitialized field + need_default = true; + } + } + + if (deopt) { + assert(!need_default, "deopt and need_default cannot both be true"); + assert(x->needs_patching(), "must be"); + assert(info != NULL, "must be"); + CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info), + Deoptimization::Reason_unloaded, + Deoptimization::Action_make_not_entrant); + __ branch(lir_cond_always, T_ILLEGAL, stub); + } else if (need_default) { + assert(!field_type_unloaded, "must be"); + assert(field->type()->is_valuetype(), "must be"); + ciValueKlass* value_klass = field->type()->as_value_klass(); + assert(value_klass->is_loaded(), "must be"); + + if (field->is_static() && holder->is_loaded()) { + ciInstance* mirror = field->holder()->java_mirror(); + ciObject* val = mirror->field_value(field).as_object(); + if (val->is_null_object()) { + // This is a non-nullable static field, but it's not initialized. + // We need to do a null check, and replace it with the default value. + } else { + // No need to perform null check on this static field + need_default = false; + } + } + + if (need_default) { + default_value = new Constant(new InstanceConstant(value_klass->default_value_instance())); + } + } + + return default_value; + } + void LIRGenerator::do_LoadField(LoadField* x) { bool needs_patching = x->needs_patching(); bool is_volatile = x->field()->is_volatile(); BasicType field_type = x->field_type();
*** 1720,1729 **** --- 1984,1998 ---- tty->print_cr(" ###class not loaded at load_%s bci %d", x->is_static() ? "static" : "field", x->printable_bci()); } #endif + Constant* default_value = NULL; + if (x->field()->is_flattenable()) { + default_value = flattenable_load_field_prolog(x, info); + } + bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception(); if (x->needs_null_check() && (needs_patching || MacroAssembler::needs_explicit_null_check(x->offset()) || stress_deopt)) {
*** 1748,1757 **** --- 2017,2036 ---- LIR_Opr result = rlock_result(x, field_type); access_load_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()), result, info ? new CodeEmitInfo(info) : NULL, info); + + if (default_value != NULL) { + LabelObj* L_end = new LabelObj(); + __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(NULL)); + __ branch(lir_cond_notEqual, T_OBJECT, L_end->label()); + set_in_conditional_code(true); + __ move(load_constant(default_value), result); + __ branch_destination(L_end->label()); + set_in_conditional_code(false); + } } //------------------------java.nio.Buffer.checkIndex------------------------
*** 1862,1879 **** // The range check performs the null check, so clear it out for the load null_check_info = NULL; } } ! DecoratorSet decorators = IN_HEAP | IS_ARRAY; ! ! LIR_Opr result = rlock_result(x, x->elt_type()); ! access_load_at(decorators, x->elt_type(), ! array, index.result(), result, ! NULL, null_check_info); } void LIRGenerator::do_NullCheck(NullCheck* x) { if (x->can_trap()) { LIRItem value(x->obj(), this); value.load_item(); --- 2141,2205 ---- // The range check performs the null check, so clear it out for the load null_check_info = NULL; } } ! if (x->vt() != NULL) { ! assert(x->array()->is_loaded_flattened_array(), "must be"); ! // Find the destination address (of the NewValueTypeInstance). ! LIR_Opr obj = x->vt()->operand(); ! LIRItem obj_item(x->vt(), this); ! ! access_flattened_array(true, array, index, obj_item); ! set_no_result(x); ! } else { ! LIR_Opr result = rlock_result(x, x->elt_type()); ! LoadFlattenedArrayStub* slow_path = NULL; ! ! if (x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) { ! index.load_item(); ! // if we are loading from flattened array, load it using a runtime call ! slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x)); ! check_flattened_array(array.result(), LIR_OprFact::illegalOpr, slow_path); ! set_in_conditional_code(true); ! } ! ! DecoratorSet decorators = IN_HEAP | IS_ARRAY; ! access_load_at(decorators, x->elt_type(), ! array, index.result(), result, ! NULL, null_check_info); ! ! if (slow_path != NULL) { ! __ branch_destination(slow_path->continuation()); ! set_in_conditional_code(false); ! } ! } } + void LIRGenerator::do_WithField(WithField* x) { + // This happens only when a class X uses the withfield bytecode to refer to + // an inline class V, where V has not yet been loaded. This is not a common + // case. Let's just deoptimize. + CodeEmitInfo* info = state_for(x, x->state_before()); + CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info), + Deoptimization::Reason_unloaded, + Deoptimization::Action_make_not_entrant); + __ branch(lir_cond_always, T_ILLEGAL, stub); + LIR_Opr reg = rlock_result(x, T_OBJECT); + __ move(LIR_OprFact::oopConst(NULL), reg); + } + + void LIRGenerator::do_DefaultValue(DefaultValue* x) { + // Same as withfield above. Let's deoptimize. + CodeEmitInfo* info = state_for(x, x->state_before()); + CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info), + Deoptimization::Reason_unloaded, + Deoptimization::Action_make_not_entrant); + __ branch(lir_cond_always, T_ILLEGAL, stub); + LIR_Opr reg = rlock_result(x, T_OBJECT); + __ move(LIR_OprFact::oopConst(NULL), reg); + } void LIRGenerator::do_NullCheck(NullCheck* x) { if (x->can_trap()) { LIRItem value(x->obj(), this); value.load_item();
*** 2715,2745 **** __ osr_entry(LIR_Assembler::osrBufferPointer()); LIR_Opr result = rlock_result(x); __ move(LIR_Assembler::osrBufferPointer(), result); } void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { assert(args->length() == arg_list->length(), "args=%d, arg_list=%d", args->length(), arg_list->length()); for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) { LIRItem* param = args->at(i); LIR_Opr loc = arg_list->at(i); ! if (loc->is_register()) { ! param->load_item_force(loc); ! } else { ! LIR_Address* addr = loc->as_address_ptr(); ! param->load_for_store(addr->type()); ! if (addr->type() == T_OBJECT) { ! __ move_wide(param->result(), addr); ! } else ! if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { ! __ unaligned_move(param->result(), addr); ! } else { ! __ move(param->result(), addr); ! } ! } } if (x->has_receiver()) { LIRItem* receiver = args->at(0); LIR_Opr loc = arg_list->at(0); --- 3041,3076 ---- __ osr_entry(LIR_Assembler::osrBufferPointer()); LIR_Opr result = rlock_result(x); __ move(LIR_Assembler::osrBufferPointer(), result); } + void LIRGenerator::invoke_load_one_argument(LIRItem* param, LIR_Opr loc) { + if (loc->is_register()) { + param->load_item_force(loc); + } else { + LIR_Address* addr = loc->as_address_ptr(); + param->load_for_store(addr->type()); + assert(addr->type() != T_VALUETYPE, "not supported yet"); + if (addr->type() == T_OBJECT) { + __ move_wide(param->result(), addr); + } else { + if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { + __ unaligned_move(param->result(), addr); + } else { + __ move(param->result(), addr); + } + } + } + } void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { assert(args->length() == arg_list->length(), "args=%d, arg_list=%d", args->length(), arg_list->length()); for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) { LIRItem* param = args->at(i); LIR_Opr loc = arg_list->at(i); ! invoke_load_one_argument(param, loc); } if (x->has_receiver()) { LIRItem* receiver = args->at(0); LIR_Opr loc = arg_list->at(0);
*** 2916,2939 **** #endif LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); ! if (can_inline_as_constant(right.value())) { right.dont_load_item(); } else { right.load_item(); } LIRItem t_val(x->tval(), this); LIRItem f_val(x->fval(), this); t_val.dont_load_item(); f_val.dont_load_item(); - LIR_Opr reg = rlock_result(x); ! __ cmp(lir_cond(x->cond()), left.result(), right.result()); ! __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type())); } #ifdef JFR_HAVE_INTRINSICS void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) { CodeEmitInfo* info = state_for(x); --- 3247,3328 ---- #endif LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); ! if (can_inline_as_constant(right.value()) && !x->substitutability_check()) { right.dont_load_item(); } else { + // substitutability_check() needs to use right as a base register. right.load_item(); } LIRItem t_val(x->tval(), this); LIRItem f_val(x->fval(), this); t_val.dont_load_item(); f_val.dont_load_item(); ! if (x->substitutability_check()) { ! substitutability_check(x, left, right, t_val, f_val); ! } else { ! LIR_Opr reg = rlock_result(x); ! __ cmp(lir_cond(x->cond()), left.result(), right.result()); ! __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type())); ! } ! } ! ! void LIRGenerator::substitutability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val) { ! assert(x->cond() == If::eql || x->cond() == If::neq, "must be"); ! bool is_acmpeq = (x->cond() == If::eql); ! LIR_Opr equal_result = is_acmpeq ? t_val.result() : f_val.result(); ! LIR_Opr not_equal_result = is_acmpeq ? f_val.result() : t_val.result(); ! LIR_Opr result = rlock_result(x); ! CodeEmitInfo* info = state_for(x, x->state_before()); ! ! substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info); ! } ! ! void LIRGenerator::substitutability_check(If* x, LIRItem& left, LIRItem& right) { ! LIR_Opr equal_result = LIR_OprFact::intConst(1); ! LIR_Opr not_equal_result = LIR_OprFact::intConst(0); ! LIR_Opr result = new_register(T_INT); ! CodeEmitInfo* info = state_for(x, x->state_before()); ! ! substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info); ! ! assert(x->cond() == If::eql || x->cond() == If::neq, "must be"); ! __ cmp(lir_cond(x->cond()), result, equal_result); ! } ! ! void LIRGenerator::substitutability_check_common(Value left_val, Value right_val, LIRItem& left, LIRItem& right, ! LIR_Opr equal_result, LIR_Opr not_equal_result, LIR_Opr result, ! CodeEmitInfo* info) { ! LIR_Opr tmp1 = LIR_OprFact::illegalOpr; ! LIR_Opr tmp2 = LIR_OprFact::illegalOpr; ! LIR_Opr left_klass_op = LIR_OprFact::illegalOpr; ! LIR_Opr right_klass_op = LIR_OprFact::illegalOpr; ! ! ciKlass* left_klass = left_val ->as_loaded_klass_or_null(); ! ciKlass* right_klass = right_val->as_loaded_klass_or_null(); ! ! if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node. ! !left_klass->is_valuetype() || !right_klass->is_valuetype()) { ! init_temps_for_substitutability_check(tmp1, tmp2); ! } ! ! if (left_klass != NULL && left_klass->is_valuetype() && left_klass == right_klass) { ! // No need to load klass -- the operands are statically known to be the same value klass. ! } else { ! BasicType t_klass = UseCompressedOops ? T_INT : T_METADATA; ! left_klass_op = new_register(t_klass); ! right_klass_op = new_register(t_klass); ! } ! ! CodeStub* slow_path = new SubstitutabilityCheckStub(left.result(), right.result(), info); ! __ substitutability_check(result, left.result(), right.result(), equal_result, not_equal_result, ! tmp1, tmp2, ! left_klass, right_klass, left_klass_op, right_klass_op, info, slow_path); } #ifdef JFR_HAVE_INTRINSICS void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) { CodeEmitInfo* info = state_for(x);
< prev index next >