14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_LIRGenerator.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "ci/ciObjArray.hpp"
36 #include "ci/ciUtilities.hpp"
37 #include "compiler/compilerDefinitions.inline.hpp"
38 #include "gc/shared/barrierSet.hpp"
39 #include "gc/shared/c1/barrierSetC1.hpp"
40 #include "oops/klass.inline.hpp"
41 #include "oops/methodCounters.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "runtime/vm_version.hpp"
45 #include "utilities/bitMap.inline.hpp"
46 #include "utilities/macros.hpp"
47 #include "utilities/powerOfTwo.hpp"
48
49 #ifdef ASSERT
50 #define __ gen()->lir(__FILE__, __LINE__)->
51 #else
52 #define __ gen()->lir()->
53 #endif
196 ResolveNode* source = source_node(src);
197 source->append(destination_node(dest));
198 }
199
200
201 //--------------------------------------------------------------
202 // LIRItem
203
204 void LIRItem::set_result(LIR_Opr opr) {
205 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
206 value()->set_operand(opr);
207
208 if (opr->is_virtual()) {
209 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), nullptr);
210 }
211
212 _result = opr;
213 }
214
215 void LIRItem::load_item() {
216 if (result()->is_illegal()) {
217 // update the items result
218 _result = value()->operand();
219 }
220 if (!result()->is_register()) {
221 LIR_Opr reg = _gen->new_register(value()->type());
222 __ move(result(), reg);
223 if (result()->is_constant()) {
224 _result = reg;
225 } else {
226 set_result(reg);
227 }
228 }
229 }
230
231
232 void LIRItem::load_for_store(BasicType type) {
233 if (_gen->can_store_as_constant(value(), type)) {
234 _result = value()->operand();
235 if (!_result->is_constant()) {
603 assert(right_op != result_op, "malformed");
604 __ move(left_op, result_op);
605 left_op = result_op;
606 }
607
608 switch(code) {
609 case Bytecodes::_iand:
610 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
611
612 case Bytecodes::_ior:
613 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
614
615 case Bytecodes::_ixor:
616 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
617
618 default: ShouldNotReachHere();
619 }
620 }
621
622
623 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
624 if (!GenerateSynchronizationCode) return;
625 // for slow path, use debug info for state after successful locking
626 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
627 __ load_stack_address_monitor(monitor_no, lock);
628 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
629 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
630 }
631
632
633 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
634 if (!GenerateSynchronizationCode) return;
635 // setup registers
636 LIR_Opr hdr = lock;
637 lock = new_hdr;
638 CodeStub* slow_path = new MonitorExitStub(lock, LockingMode != LM_MONITOR, monitor_no);
639 __ load_stack_address_monitor(monitor_no, lock);
640 __ unlock_object(hdr, object, lock, scratch, slow_path);
641 }
642
643 #ifndef PRODUCT
644 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
645 if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
646 tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
647 } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
648 tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
649 }
650 }
651 #endif
652
653 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
654 klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
655 // If klass is not loaded we do not know if the klass has finalizers:
656 if (UseFastNewInstance && klass->is_loaded()
657 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
658
659 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
660
661 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
662
663 assert(klass->is_loaded(), "must be loaded");
664 // allocate space for instance
665 assert(klass->size_helper() > 0, "illegal instance size");
666 const int instance_size = align_object_size(klass->size_helper());
667 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
668 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
669 } else {
670 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
671 __ branch(lir_cond_always, slow_path);
672 __ branch_destination(slow_path->continuation());
673 }
674 }
675
676
677 static bool is_constant_zero(Instruction* inst) {
678 IntConstant* c = inst->type()->as_IntConstant();
679 if (c) {
680 return (c->value() == 0);
681 }
682 return false;
683 }
684
685
686 static bool positive_constant(Instruction* inst) {
687 IntConstant* c = inst->type()->as_IntConstant();
688 if (c) {
689 return (c->value() >= 0);
690 }
691 return false;
751 if (src_type != nullptr) {
752 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
753 is_exact = true;
754 expected_type = dst_type;
755 }
756 }
757 }
758 // at least pass along a good guess
759 if (expected_type == nullptr) expected_type = dst_exact_type;
760 if (expected_type == nullptr) expected_type = src_declared_type;
761 if (expected_type == nullptr) expected_type = dst_declared_type;
762
763 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
764 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
765 }
766
767 // if a probable array type has been identified, figure out if any
768 // of the required checks for a fast case can be elided.
769 int flags = LIR_OpArrayCopy::all_flags;
770
771 if (!src_objarray)
772 flags &= ~LIR_OpArrayCopy::src_objarray;
773 if (!dst_objarray)
774 flags &= ~LIR_OpArrayCopy::dst_objarray;
775
776 if (!x->arg_needs_null_check(0))
777 flags &= ~LIR_OpArrayCopy::src_null_check;
778 if (!x->arg_needs_null_check(2))
779 flags &= ~LIR_OpArrayCopy::dst_null_check;
780
781
782 if (expected_type != nullptr) {
783 Value length_limit = nullptr;
784
785 IfOp* ifop = length->as_IfOp();
786 if (ifop != nullptr) {
787 // look for expressions like min(v, a.length) which ends up as
788 // x > y ? y : x or x >= y ? y : x
789 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
790 ifop->x() == ifop->fval() &&
1556 case T_FLOAT:
1557 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1558 break;
1559 case T_LONG:
1560 case T_DOUBLE:
1561 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1562 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1563 break;
1564 case T_OBJECT:
1565 if (c->as_jobject() != other->as_jobject()) continue;
1566 break;
1567 default:
1568 break;
1569 }
1570 return _reg_for_constants.at(i);
1571 }
1572 }
1573
1574 LIR_Opr result = new_register(t);
1575 __ move((LIR_Opr)c, result);
1576 _constants.append(c);
1577 _reg_for_constants.append(result);
1578 return result;
1579 }
1580
1581 //------------------------field access--------------------------------------
1582
1583 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1584 assert(x->number_of_arguments() == 4, "wrong type");
1585 LIRItem obj (x->argument_at(0), this); // object
1586 LIRItem offset(x->argument_at(1), this); // offset of field
1587 LIRItem cmp (x->argument_at(2), this); // value to compare with field
1588 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
1589 assert(obj.type()->tag() == objectTag, "invalid type");
1590 assert(cmp.type()->tag() == type->tag(), "invalid type");
1591 assert(val.type()->tag() == type->tag(), "invalid type");
1592
1593 LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1594 obj, offset, cmp, val);
1595 set_result(x, result);
1596 }
1597
1598 // Comment copied form templateTable_i486.cpp
1599 // ----------------------------------------------------------------------------
1600 // Volatile variables demand their effects be made known to all CPU's in
1673 (needs_patching ||
1674 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1675 // Emit an explicit null check because the offset is too large.
1676 // If the class is not loaded and the object is null, we need to deoptimize to throw a
1677 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1678 __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1679 }
1680
1681 DecoratorSet decorators = IN_HEAP;
1682 if (is_volatile) {
1683 decorators |= MO_SEQ_CST;
1684 }
1685 if (needs_patching) {
1686 decorators |= C1_NEEDS_PATCHING;
1687 }
1688
1689 access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1690 value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
1691 }
1692
1693 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1694 assert(x->is_pinned(),"");
1695 bool needs_range_check = x->compute_needs_range_check();
1696 bool use_length = x->length() != nullptr;
1697 bool obj_store = is_reference_type(x->elt_type());
1698 bool needs_store_check = obj_store && (x->value()->as_Constant() == nullptr ||
1699 !get_jobject_constant(x->value())->is_null_object() ||
1700 x->should_profile());
1701
1702 LIRItem array(x->array(), this);
1703 LIRItem index(x->index(), this);
1704 LIRItem value(x->value(), this);
1705 LIRItem length(this);
1706
1707 array.load_item();
1708 index.load_nonconstant();
1709
1710 if (use_length && needs_range_check) {
1711 length.set_instruction(x->length());
1712 length.load_item();
1713
1714 }
1715 if (needs_store_check || x->check_boolean()) {
1716 value.load_item();
1717 } else {
1718 value.load_for_store(x->elt_type());
1719 }
1720
1721 set_no_result(x);
1722
1723 // the CodeEmitInfo must be duplicated for each different
1724 // LIR-instruction because spilling can occur anywhere between two
1725 // instructions and so the debug information must be different
1726 CodeEmitInfo* range_check_info = state_for(x);
1727 CodeEmitInfo* null_check_info = nullptr;
1728 if (x->needs_null_check()) {
1729 null_check_info = new CodeEmitInfo(range_check_info);
1730 }
1731
1732 if (needs_range_check) {
1733 if (use_length) {
1734 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1735 __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1736 } else {
1737 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1738 // range_check also does the null check
1739 null_check_info = nullptr;
1740 }
1741 }
1742
1743 if (GenerateArrayStoreCheck && needs_store_check) {
1744 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1745 array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1746 }
1747
1748 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1749 if (x->check_boolean()) {
1750 decorators |= C1_MASK_BOOLEAN;
1751 }
1752
1753 access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1754 nullptr, null_check_info);
1755 }
1756
1757 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1758 LIRItem& base, LIR_Opr offset, LIR_Opr result,
1759 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1760 decorators |= ACCESS_READ;
1761 LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1762 if (access.is_raw()) {
1763 _barrier_set->BarrierSetC1::load_at(access, result);
1764 } else {
1765 _barrier_set->load_at(access, result);
1766 }
1767 }
1768
1769 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1770 LIR_Opr addr, LIR_Opr result) {
1771 decorators |= ACCESS_READ;
1772 LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1773 access.set_resolved_addr(addr);
1774 if (access.is_raw()) {
1872 __ move(LIR_OprFact::oopConst(nullptr), obj);
1873 }
1874 // Emit an explicit null check because the offset is too large.
1875 // If the class is not loaded and the object is null, we need to deoptimize to throw a
1876 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1877 __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1878 }
1879
1880 DecoratorSet decorators = IN_HEAP;
1881 if (is_volatile) {
1882 decorators |= MO_SEQ_CST;
1883 }
1884 if (needs_patching) {
1885 decorators |= C1_NEEDS_PATCHING;
1886 }
1887
1888 LIR_Opr result = rlock_result(x, field_type);
1889 access_load_at(decorators, field_type,
1890 object, LIR_OprFact::intConst(x->offset()), result,
1891 info ? new CodeEmitInfo(info) : nullptr, info);
1892 }
1893
1894 // int/long jdk.internal.util.Preconditions.checkIndex
1895 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
1896 assert(x->number_of_arguments() == 3, "wrong type");
1897 LIRItem index(x->argument_at(0), this);
1898 LIRItem length(x->argument_at(1), this);
1899 LIRItem oobef(x->argument_at(2), this);
1900
1901 index.load_item();
1902 length.load_item();
1903 oobef.load_item();
1904
1905 LIR_Opr result = rlock_result(x);
1906 // x->state() is created from copy_state_for_exception, it does not contains arguments
1907 // we should prepare them before entering into interpreter mode due to deoptimization.
1908 ValueStack* state = x->state();
1909 for (int i = 0; i < x->number_of_arguments(); i++) {
1910 Value arg = x->argument_at(i);
1911 state->push(arg->type(), arg);
2016 __ move(LIR_OprFact::oopConst(nullptr), obj);
2017 __ null_check(obj, new CodeEmitInfo(null_check_info));
2018 }
2019 }
2020
2021 if (needs_range_check) {
2022 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2023 __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
2024 } else if (use_length) {
2025 // TODO: use a (modified) version of array_range_check that does not require a
2026 // constant length to be loaded to a register
2027 __ cmp(lir_cond_belowEqual, length.result(), index.result());
2028 __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
2029 } else {
2030 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2031 // The range check performs the null check, so clear it out for the load
2032 null_check_info = nullptr;
2033 }
2034 }
2035
2036 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2037
2038 LIR_Opr result = rlock_result(x, x->elt_type());
2039 access_load_at(decorators, x->elt_type(),
2040 array, index.result(), result,
2041 nullptr, null_check_info);
2042 }
2043
2044
2045 void LIRGenerator::do_NullCheck(NullCheck* x) {
2046 if (x->can_trap()) {
2047 LIRItem value(x->obj(), this);
2048 value.load_item();
2049 CodeEmitInfo* info = state_for(x);
2050 __ null_check(value.result(), info);
2051 }
2052 }
2053
2054
2055 void LIRGenerator::do_TypeCast(TypeCast* x) {
2056 LIRItem value(x->obj(), this);
2057 value.load_item();
2058 // the result is the same as from the node we are casting
2059 set_result(x, value.result());
2060 }
2061
2523 Compilation* comp = Compilation::current();
2524 if (do_update) {
2525 // try to find exact type, using CHA if possible, so that loading
2526 // the klass from the object can be avoided
2527 ciType* type = obj->exact_type();
2528 if (type == nullptr) {
2529 type = obj->declared_type();
2530 type = comp->cha_exact_type(type);
2531 }
2532 assert(type == nullptr || type->is_klass(), "type should be class");
2533 exact_klass = (type != nullptr && type->is_loaded()) ? (ciKlass*)type : nullptr;
2534
2535 do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2536 }
2537
2538 if (!do_null && !do_update) {
2539 return result;
2540 }
2541
2542 ciKlass* exact_signature_k = nullptr;
2543 if (do_update) {
2544 // Is the type from the signature exact (the only one possible)?
2545 exact_signature_k = signature_at_call_k->exact_klass();
2546 if (exact_signature_k == nullptr) {
2547 exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2548 } else {
2549 result = exact_signature_k;
2550 // Known statically. No need to emit any code: prevent
2551 // LIR_Assembler::emit_profile_type() from emitting useless code
2552 profiled_k = ciTypeEntries::with_status(result, profiled_k);
2553 }
2554 // exact_klass and exact_signature_k can be both non null but
2555 // different if exact_klass is loaded after the ciObject for
2556 // exact_signature_k is created.
2557 if (exact_klass == nullptr && exact_signature_k != nullptr && exact_klass != exact_signature_k) {
2558 // sometimes the type of the signature is better than the best type
2559 // the compiler has
2560 exact_klass = exact_signature_k;
2561 }
2562 if (callee_signature_k != nullptr &&
2563 callee_signature_k != signature_at_call_k) {
2608 assert(!src->is_illegal(), "check");
2609 BasicType t = src->type();
2610 if (is_reference_type(t)) {
2611 intptr_t profiled_k = parameters->type(j);
2612 Local* local = x->state()->local_at(java_index)->as_Local();
2613 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2614 in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2615 profiled_k, local, mdp, false, local->declared_type()->as_klass(), nullptr);
2616 // If the profile is known statically set it once for all and do not emit any code
2617 if (exact != nullptr) {
2618 md->set_parameter_type(j, exact);
2619 }
2620 j++;
2621 }
2622 java_index += type2size[t];
2623 }
2624 }
2625 }
2626 }
2627
2628 void LIRGenerator::do_Base(Base* x) {
2629 __ std_entry(LIR_OprFact::illegalOpr);
2630 // Emit moves from physical registers / stack slots to virtual registers
2631 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2632 IRScope* irScope = compilation()->hir()->top_scope();
2633 int java_index = 0;
2634 for (int i = 0; i < args->length(); i++) {
2635 LIR_Opr src = args->at(i);
2636 assert(!src->is_illegal(), "check");
2637 BasicType t = src->type();
2638
2639 // Types which are smaller than int are passed as int, so
2640 // correct the type which passed.
2641 switch (t) {
2642 case T_BYTE:
2643 case T_BOOLEAN:
2644 case T_SHORT:
2645 case T_CHAR:
2646 t = T_INT;
2647 break;
2688 }
2689 assert(obj->is_valid(), "must be valid");
2690
2691 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2692 LIR_Opr lock = syncLockOpr();
2693 __ load_stack_address_monitor(0, lock);
2694
2695 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, x->check_flag(Instruction::DeoptimizeOnException));
2696 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2697
2698 // receiver is guaranteed non-null so don't need CodeEmitInfo
2699 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, nullptr);
2700 }
2701 }
2702 // increment invocation counters if needed
2703 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2704 profile_parameters(x);
2705 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, false);
2706 increment_invocation_counter(info);
2707 }
2708
2709 // all blocks with a successor must end with an unconditional jump
2710 // to the successor even if they are consecutive
2711 __ jump(x->default_sux());
2712 }
2713
2714
2715 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2716 // construct our frame and model the production of incoming pointer
2717 // to the OSR buffer.
2718 __ osr_entry(LIR_Assembler::osrBufferPointer());
2719 LIR_Opr result = rlock_result(x);
2720 __ move(LIR_Assembler::osrBufferPointer(), result);
2721 }
2722
2723
2724 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2725 assert(args->length() == arg_list->length(),
2726 "args=%d, arg_list=%d", args->length(), arg_list->length());
2727 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2728 LIRItem* param = args->at(i);
2729 LIR_Opr loc = arg_list->at(i);
2730 if (loc->is_register()) {
2731 param->load_item_force(loc);
2732 } else {
2733 LIR_Address* addr = loc->as_address_ptr();
2734 param->load_for_store(addr->type());
2735 if (addr->type() == T_OBJECT) {
2736 __ move_wide(param->result(), addr);
2737 } else
2738 __ move(param->result(), addr);
2739 }
2740 }
2741
2742 if (x->has_receiver()) {
2743 LIRItem* receiver = args->at(0);
2744 LIR_Opr loc = arg_list->at(0);
2745 if (loc->is_register()) {
2746 receiver->load_item_force(loc);
2747 } else {
2748 assert(loc->is_address(), "just checking");
2749 receiver->load_for_store(T_OBJECT);
2750 __ move_wide(receiver->result(), loc->as_address_ptr());
2751 }
2752 }
2753 }
2754
2755
2756 // Visits all arguments, returns appropriate items without loading them
2757 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2758 LIRItemList* argument_items = new LIRItemList();
2759 if (x->has_receiver()) {
2885 __ move(tmp, reg);
2886 }
2887
2888
2889
2890 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2891 void LIRGenerator::do_IfOp(IfOp* x) {
2892 #ifdef ASSERT
2893 {
2894 ValueTag xtag = x->x()->type()->tag();
2895 ValueTag ttag = x->tval()->type()->tag();
2896 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2897 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2898 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2899 }
2900 #endif
2901
2902 LIRItem left(x->x(), this);
2903 LIRItem right(x->y(), this);
2904 left.load_item();
2905 if (can_inline_as_constant(right.value())) {
2906 right.dont_load_item();
2907 } else {
2908 right.load_item();
2909 }
2910
2911 LIRItem t_val(x->tval(), this);
2912 LIRItem f_val(x->fval(), this);
2913 t_val.dont_load_item();
2914 f_val.dont_load_item();
2915 LIR_Opr reg = rlock_result(x);
2916
2917 __ cmp(lir_cond(x->cond()), left.result(), right.result());
2918 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
2919 }
2920
2921 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
2922 assert(x->number_of_arguments() == 0, "wrong type");
2923 // Enforce computation of _reserved_argument_area_size which is required on some platforms.
2924 BasicTypeList signature;
2925 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2926 LIR_Opr reg = result_register_for(x->type());
2927 __ call_runtime_leaf(routine, getThreadTemp(),
2928 reg, new LIR_OprList());
2929 LIR_Opr result = rlock_result(x);
2930 __ move(reg, result);
2931 }
2932
2933
2934
2935 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2936 switch (x->id()) {
2937 case vmIntrinsics::_intBitsToFloat :
2938 case vmIntrinsics::_doubleToRawLongBits :
3172 if (x->recv() != nullptr || x->nb_profiled_args() > 0) {
3173 profile_parameters_at_call(x);
3174 }
3175
3176 if (x->recv() != nullptr) {
3177 LIRItem value(x->recv(), this);
3178 value.load_item();
3179 recv = new_register(T_OBJECT);
3180 __ move(value.result(), recv);
3181 }
3182 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3183 }
3184
3185 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3186 int bci = x->bci_of_invoke();
3187 ciMethodData* md = x->method()->method_data_or_null();
3188 assert(md != nullptr, "Sanity");
3189 ciProfileData* data = md->bci_to_data(bci);
3190 if (data != nullptr) {
3191 assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3192 ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3193 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3194
3195 bool ignored_will_link;
3196 ciSignature* signature_at_call = nullptr;
3197 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3198
3199 // The offset within the MDO of the entry to update may be too large
3200 // to be used in load/store instructions on some platforms. So have
3201 // profile_type() compute the address of the profile in a register.
3202 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3203 ret->type(), x->ret(), mdp,
3204 !x->needs_null_check(),
3205 signature_at_call->return_type()->as_klass(),
3206 x->callee()->signature()->return_type()->as_klass());
3207 if (exact != nullptr) {
3208 md->set_return_type(bci, exact);
3209 }
3210 }
3211 }
3212
3213 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3214 // We can safely ignore accessors here, since c2 will inline them anyway,
3215 // accessors are also always mature.
3216 if (!x->inlinee()->is_accessor()) {
3217 CodeEmitInfo* info = state_for(x, x->state(), true);
3218 // Notify the runtime very infrequently only to take care of counter overflows
3219 int freq_log = Tier23InlineeNotifyFreqLog;
3220 double scale;
3221 if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {
3222 freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3223 }
3224 increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3225 }
3226 }
3227
3228 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3229 if (compilation()->is_profiling()) {
3230 #if defined(X86) && !defined(_LP64)
3231 // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3232 LIR_Opr left_copy = new_register(left->type());
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_LIRGenerator.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciFlatArrayKlass.hpp"
35 #include "ci/ciInlineKlass.hpp"
36 #include "ci/ciInstance.hpp"
37 #include "ci/ciObjArray.hpp"
38 #include "ci/ciUtilities.hpp"
39 #include "compiler/compilerDefinitions.inline.hpp"
40 #include "gc/shared/barrierSet.hpp"
41 #include "gc/shared/c1/barrierSetC1.hpp"
42 #include "oops/klass.inline.hpp"
43 #include "oops/methodCounters.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/vm_version.hpp"
47 #include "utilities/bitMap.inline.hpp"
48 #include "utilities/macros.hpp"
49 #include "utilities/powerOfTwo.hpp"
50
51 #ifdef ASSERT
52 #define __ gen()->lir(__FILE__, __LINE__)->
53 #else
54 #define __ gen()->lir()->
55 #endif
198 ResolveNode* source = source_node(src);
199 source->append(destination_node(dest));
200 }
201
202
203 //--------------------------------------------------------------
204 // LIRItem
205
206 void LIRItem::set_result(LIR_Opr opr) {
207 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
208 value()->set_operand(opr);
209
210 if (opr->is_virtual()) {
211 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), nullptr);
212 }
213
214 _result = opr;
215 }
216
217 void LIRItem::load_item() {
218 assert(!_gen->in_conditional_code(), "LIRItem cannot be loaded in conditional code");
219
220 if (result()->is_illegal()) {
221 // update the items result
222 _result = value()->operand();
223 }
224 if (!result()->is_register()) {
225 LIR_Opr reg = _gen->new_register(value()->type());
226 __ move(result(), reg);
227 if (result()->is_constant()) {
228 _result = reg;
229 } else {
230 set_result(reg);
231 }
232 }
233 }
234
235
236 void LIRItem::load_for_store(BasicType type) {
237 if (_gen->can_store_as_constant(value(), type)) {
238 _result = value()->operand();
239 if (!_result->is_constant()) {
607 assert(right_op != result_op, "malformed");
608 __ move(left_op, result_op);
609 left_op = result_op;
610 }
611
612 switch(code) {
613 case Bytecodes::_iand:
614 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
615
616 case Bytecodes::_ior:
617 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
618
619 case Bytecodes::_ixor:
620 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
621
622 default: ShouldNotReachHere();
623 }
624 }
625
626
627 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no,
628 CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_imse_stub) {
629 if (!GenerateSynchronizationCode) return;
630 // for slow path, use debug info for state after successful locking
631 CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_imse_stub, scratch);
632 __ load_stack_address_monitor(monitor_no, lock);
633 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
634 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_imse_stub);
635 }
636
637
638 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
639 if (!GenerateSynchronizationCode) return;
640 // setup registers
641 LIR_Opr hdr = lock;
642 lock = new_hdr;
643 CodeStub* slow_path = new MonitorExitStub(lock, LockingMode != LM_MONITOR, monitor_no);
644 __ load_stack_address_monitor(monitor_no, lock);
645 __ unlock_object(hdr, object, lock, scratch, slow_path);
646 }
647
648 #ifndef PRODUCT
649 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
650 if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
651 tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
652 } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
653 tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
654 }
655 }
656 #endif
657
658 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, bool allow_inline, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
659 if (allow_inline) {
660 assert(!is_unresolved && klass->is_loaded(), "inline type klass should be resolved");
661 __ metadata2reg(klass->constant_encoding(), klass_reg);
662 } else {
663 klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
664 }
665 // If klass is not loaded we do not know if the klass has finalizers or is an unexpected inline klass
666 if (UseFastNewInstance && klass->is_loaded() && (allow_inline || !klass->is_inlinetype())
667 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
668
669 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
670
671 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
672
673 assert(klass->is_loaded(), "must be loaded");
674 // allocate space for instance
675 assert(klass->size_helper() > 0, "illegal instance size");
676 const int instance_size = align_object_size(klass->size_helper());
677 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
678 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
679 } else {
680 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
681 __ jump(slow_path);
682 __ branch_destination(slow_path->continuation());
683 }
684 }
685
686
687 static bool is_constant_zero(Instruction* inst) {
688 IntConstant* c = inst->type()->as_IntConstant();
689 if (c) {
690 return (c->value() == 0);
691 }
692 return false;
693 }
694
695
696 static bool positive_constant(Instruction* inst) {
697 IntConstant* c = inst->type()->as_IntConstant();
698 if (c) {
699 return (c->value() >= 0);
700 }
701 return false;
761 if (src_type != nullptr) {
762 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
763 is_exact = true;
764 expected_type = dst_type;
765 }
766 }
767 }
768 // at least pass along a good guess
769 if (expected_type == nullptr) expected_type = dst_exact_type;
770 if (expected_type == nullptr) expected_type = src_declared_type;
771 if (expected_type == nullptr) expected_type = dst_declared_type;
772
773 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
774 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
775 }
776
777 // if a probable array type has been identified, figure out if any
778 // of the required checks for a fast case can be elided.
779 int flags = LIR_OpArrayCopy::all_flags;
780
781 if (!src->is_loaded_flat_array() && !dst->is_loaded_flat_array()) {
782 flags &= ~LIR_OpArrayCopy::always_slow_path;
783 }
784 if (!src->maybe_flat_array()) {
785 flags &= ~LIR_OpArrayCopy::src_inlinetype_check;
786 }
787 if (!dst->maybe_flat_array() && !dst->maybe_null_free_array()) {
788 flags &= ~LIR_OpArrayCopy::dst_inlinetype_check;
789 }
790
791 if (!src_objarray)
792 flags &= ~LIR_OpArrayCopy::src_objarray;
793 if (!dst_objarray)
794 flags &= ~LIR_OpArrayCopy::dst_objarray;
795
796 if (!x->arg_needs_null_check(0))
797 flags &= ~LIR_OpArrayCopy::src_null_check;
798 if (!x->arg_needs_null_check(2))
799 flags &= ~LIR_OpArrayCopy::dst_null_check;
800
801
802 if (expected_type != nullptr) {
803 Value length_limit = nullptr;
804
805 IfOp* ifop = length->as_IfOp();
806 if (ifop != nullptr) {
807 // look for expressions like min(v, a.length) which ends up as
808 // x > y ? y : x or x >= y ? y : x
809 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
810 ifop->x() == ifop->fval() &&
1576 case T_FLOAT:
1577 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1578 break;
1579 case T_LONG:
1580 case T_DOUBLE:
1581 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1582 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1583 break;
1584 case T_OBJECT:
1585 if (c->as_jobject() != other->as_jobject()) continue;
1586 break;
1587 default:
1588 break;
1589 }
1590 return _reg_for_constants.at(i);
1591 }
1592 }
1593
1594 LIR_Opr result = new_register(t);
1595 __ move((LIR_Opr)c, result);
1596 if (!in_conditional_code()) {
1597 _constants.append(c);
1598 _reg_for_constants.append(result);
1599 }
1600 return result;
1601 }
1602
1603 void LIRGenerator::set_in_conditional_code(bool v) {
1604 assert(v != _in_conditional_code, "must change state");
1605 _in_conditional_code = v;
1606 }
1607
1608
1609 //------------------------field access--------------------------------------
1610
1611 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1612 assert(x->number_of_arguments() == 4, "wrong type");
1613 LIRItem obj (x->argument_at(0), this); // object
1614 LIRItem offset(x->argument_at(1), this); // offset of field
1615 LIRItem cmp (x->argument_at(2), this); // value to compare with field
1616 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
1617 assert(obj.type()->tag() == objectTag, "invalid type");
1618 assert(cmp.type()->tag() == type->tag(), "invalid type");
1619 assert(val.type()->tag() == type->tag(), "invalid type");
1620
1621 LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1622 obj, offset, cmp, val);
1623 set_result(x, result);
1624 }
1625
1626 // Comment copied form templateTable_i486.cpp
1627 // ----------------------------------------------------------------------------
1628 // Volatile variables demand their effects be made known to all CPU's in
1701 (needs_patching ||
1702 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1703 // Emit an explicit null check because the offset is too large.
1704 // If the class is not loaded and the object is null, we need to deoptimize to throw a
1705 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1706 __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1707 }
1708
1709 DecoratorSet decorators = IN_HEAP;
1710 if (is_volatile) {
1711 decorators |= MO_SEQ_CST;
1712 }
1713 if (needs_patching) {
1714 decorators |= C1_NEEDS_PATCHING;
1715 }
1716
1717 access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1718 value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
1719 }
1720
1721 // FIXME -- I can't find any other way to pass an address to access_load_at().
1722 class TempResolvedAddress: public Instruction {
1723 public:
1724 TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) {
1725 set_operand(addr);
1726 }
1727 virtual void input_values_do(ValueVisitor*) {}
1728 virtual void visit(InstructionVisitor* v) {}
1729 virtual const char* name() const { return "TempResolvedAddress"; }
1730 };
1731
1732 LIR_Opr LIRGenerator::get_and_load_element_address(LIRItem& array, LIRItem& index) {
1733 ciType* array_type = array.value()->declared_type();
1734 ciFlatArrayKlass* flat_array_klass = array_type->as_flat_array_klass();
1735 assert(flat_array_klass->is_loaded(), "must be");
1736
1737 int array_header_size = flat_array_klass->array_header_in_bytes();
1738 int shift = flat_array_klass->log2_element_size();
1739
1740 #ifndef _LP64
1741 LIR_Opr index_op = new_register(T_INT);
1742 // FIXME -- on 32-bit, the shift below can overflow, so we need to check that
1743 // the top (shift+1) bits of index_op must be zero, or
1744 // else throw ArrayIndexOutOfBoundsException
1745 if (index.result()->is_constant()) {
1746 jint const_index = index.result()->as_jint();
1747 __ move(LIR_OprFact::intConst(const_index << shift), index_op);
1748 } else {
1749 __ shift_left(index_op, shift, index.result());
1750 }
1751 #else
1752 LIR_Opr index_op = new_register(T_LONG);
1753 if (index.result()->is_constant()) {
1754 jint const_index = index.result()->as_jint();
1755 __ move(LIR_OprFact::longConst(const_index << shift), index_op);
1756 } else {
1757 __ convert(Bytecodes::_i2l, index.result(), index_op);
1758 // Need to shift manually, as LIR_Address can scale only up to 3.
1759 __ shift_left(index_op, shift, index_op);
1760 }
1761 #endif
1762
1763 LIR_Opr elm_op = new_pointer_register();
1764 LIR_Address* elm_address = generate_address(array.result(), index_op, 0, array_header_size, T_ADDRESS);
1765 __ leal(LIR_OprFact::address(elm_address), elm_op);
1766 return elm_op;
1767 }
1768
1769 void LIRGenerator::access_sub_element(LIRItem& array, LIRItem& index, LIR_Opr& result, ciField* field, int sub_offset) {
1770 assert(field != nullptr, "Need a subelement type specified");
1771
1772 // Find the starting address of the source (inside the array)
1773 LIR_Opr elm_op = get_and_load_element_address(array, index);
1774
1775 BasicType subelt_type = field->type()->basic_type();
1776 TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(subelt_type), elm_op);
1777 LIRItem elm_item(elm_resolved_addr, this);
1778
1779 DecoratorSet decorators = IN_HEAP;
1780 access_load_at(decorators, subelt_type,
1781 elm_item, LIR_OprFact::intConst(sub_offset), result,
1782 nullptr, nullptr);
1783
1784 if (field->is_null_free()) {
1785 assert(field->type()->is_loaded(), "Must be");
1786 assert(field->type()->is_inlinetype(), "Must be if loaded");
1787 assert(field->type()->as_inline_klass()->is_initialized(), "Must be");
1788 LabelObj* L_end = new LabelObj();
1789 __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(nullptr));
1790 __ branch(lir_cond_notEqual, L_end->label());
1791 set_in_conditional_code(true);
1792 Constant* default_value = new Constant(new InstanceConstant(field->type()->as_inline_klass()->default_instance()));
1793 if (default_value->is_pinned()) {
1794 __ move(LIR_OprFact::value_type(default_value->type()), result);
1795 } else {
1796 __ move(load_constant(default_value), result);
1797 }
1798 __ branch_destination(L_end->label());
1799 set_in_conditional_code(false);
1800 }
1801 }
1802
1803 void LIRGenerator::access_flat_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item,
1804 ciField* field, int sub_offset) {
1805 assert(sub_offset == 0 || field != nullptr, "Sanity check");
1806
1807 // Find the starting address of the source (inside the array)
1808 LIR_Opr elm_op = get_and_load_element_address(array, index);
1809
1810 ciInlineKlass* elem_klass = nullptr;
1811 if (field != nullptr) {
1812 elem_klass = field->type()->as_inline_klass();
1813 } else {
1814 elem_klass = array.value()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass();
1815 }
1816 for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) {
1817 ciField* inner_field = elem_klass->nonstatic_field_at(i);
1818 assert(!inner_field->is_flat(), "flat fields must have been expanded");
1819 int obj_offset = inner_field->offset_in_bytes();
1820 int elm_offset = obj_offset - elem_klass->first_field_offset() + sub_offset; // object header is not stored in array.
1821 BasicType field_type = inner_field->type()->basic_type();
1822
1823 // Types which are smaller than int are still passed in an int register.
1824 BasicType reg_type = field_type;
1825 switch (reg_type) {
1826 case T_BYTE:
1827 case T_BOOLEAN:
1828 case T_SHORT:
1829 case T_CHAR:
1830 reg_type = T_INT;
1831 break;
1832 default:
1833 break;
1834 }
1835
1836 LIR_Opr temp = new_register(reg_type);
1837 TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op);
1838 LIRItem elm_item(elm_resolved_addr, this);
1839
1840 DecoratorSet decorators = IN_HEAP;
1841 if (is_load) {
1842 access_load_at(decorators, field_type,
1843 elm_item, LIR_OprFact::intConst(elm_offset), temp,
1844 nullptr, nullptr);
1845 access_store_at(decorators, field_type,
1846 obj_item, LIR_OprFact::intConst(obj_offset), temp,
1847 nullptr, nullptr);
1848 } else {
1849 access_load_at(decorators, field_type,
1850 obj_item, LIR_OprFact::intConst(obj_offset), temp,
1851 nullptr, nullptr);
1852 access_store_at(decorators, field_type,
1853 elm_item, LIR_OprFact::intConst(elm_offset), temp,
1854 nullptr, nullptr);
1855 }
1856 }
1857 }
1858
1859 void LIRGenerator::check_flat_array(LIR_Opr array, LIR_Opr value, CodeStub* slow_path) {
1860 LIR_Opr tmp = new_register(T_METADATA);
1861 __ check_flat_array(array, value, tmp, slow_path);
1862 }
1863
1864 void LIRGenerator::check_null_free_array(LIRItem& array, LIRItem& value, CodeEmitInfo* info) {
1865 LabelObj* L_end = new LabelObj();
1866 LIR_Opr tmp = new_register(T_METADATA);
1867 __ check_null_free_array(array.result(), tmp);
1868 __ branch(lir_cond_equal, L_end->label());
1869 __ null_check(value.result(), info);
1870 __ branch_destination(L_end->label());
1871 }
1872
1873 bool LIRGenerator::needs_flat_array_store_check(StoreIndexed* x) {
1874 if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
1875 ciType* type = x->value()->declared_type();
1876 if (type != nullptr && type->is_klass()) {
1877 ciKlass* klass = type->as_klass();
1878 if (!klass->can_be_inline_klass() || (klass->is_inlinetype() && !klass->as_inline_klass()->flat_in_array())) {
1879 // This is known to be a non-flat object. If the array is a flat array,
1880 // it will be caught by the code generated by array_store_check().
1881 return false;
1882 }
1883 }
1884 // We're not 100% sure, so let's do the flat_array_store_check.
1885 return true;
1886 }
1887 return false;
1888 }
1889
1890 bool LIRGenerator::needs_null_free_array_store_check(StoreIndexed* x) {
1891 return x->elt_type() == T_OBJECT && x->array()->maybe_null_free_array();
1892 }
1893
1894 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1895 assert(x->is_pinned(),"");
1896 assert(x->elt_type() != T_ARRAY, "never used");
1897 bool is_loaded_flat_array = x->array()->is_loaded_flat_array();
1898 bool needs_range_check = x->compute_needs_range_check();
1899 bool use_length = x->length() != nullptr;
1900 bool obj_store = is_reference_type(x->elt_type());
1901 bool needs_store_check = obj_store && !(is_loaded_flat_array && x->is_exact_flat_array_store()) &&
1902 (x->value()->as_Constant() == nullptr ||
1903 !get_jobject_constant(x->value())->is_null_object());
1904
1905 LIRItem array(x->array(), this);
1906 LIRItem index(x->index(), this);
1907 LIRItem value(x->value(), this);
1908 LIRItem length(this);
1909
1910 array.load_item();
1911 index.load_nonconstant();
1912
1913 if (use_length && needs_range_check) {
1914 length.set_instruction(x->length());
1915 length.load_item();
1916 }
1917
1918 if (needs_store_check || x->check_boolean()
1919 || is_loaded_flat_array || needs_flat_array_store_check(x) || needs_null_free_array_store_check(x)) {
1920 value.load_item();
1921 } else {
1922 value.load_for_store(x->elt_type());
1923 }
1924
1925 set_no_result(x);
1926
1927 // the CodeEmitInfo must be duplicated for each different
1928 // LIR-instruction because spilling can occur anywhere between two
1929 // instructions and so the debug information must be different
1930 CodeEmitInfo* range_check_info = state_for(x);
1931 CodeEmitInfo* null_check_info = nullptr;
1932 if (x->needs_null_check()) {
1933 null_check_info = new CodeEmitInfo(range_check_info);
1934 }
1935
1936 if (needs_range_check) {
1937 if (use_length) {
1938 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1939 __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1940 } else {
1941 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1942 // range_check also does the null check
1943 null_check_info = nullptr;
1944 }
1945 }
1946
1947 if (x->should_profile()) {
1948 if (x->array()->is_loaded_flat_array()) {
1949 // No need to profile a store to a flat array of known type. This can happen if
1950 // the type only became known after optimizations (for example, after the PhiSimplifier).
1951 x->set_should_profile(false);
1952 } else {
1953 int bci = x->profiled_bci();
1954 ciMethodData* md = x->profiled_method()->method_data();
1955 assert(md != nullptr, "Sanity");
1956 ciProfileData* data = md->bci_to_data(bci);
1957 assert(data != nullptr && data->is_ArrayStoreData(), "incorrect profiling entry");
1958 ciArrayStoreData* store_data = (ciArrayStoreData*)data;
1959 profile_array_type(x, md, store_data);
1960 assert(store_data->is_ArrayStoreData(), "incorrect profiling entry");
1961 if (x->array()->maybe_null_free_array()) {
1962 profile_null_free_array(array, md, store_data);
1963 }
1964 }
1965 }
1966
1967 if (GenerateArrayStoreCheck && needs_store_check) {
1968 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1969 array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1970 }
1971
1972 if (is_loaded_flat_array) {
1973 if (!x->value()->is_null_free()) {
1974 __ null_check(value.result(), new CodeEmitInfo(range_check_info));
1975 }
1976 // If array element is an empty inline type, no need to copy anything
1977 if (!x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) {
1978 access_flat_array(false, array, index, value);
1979 }
1980 } else {
1981 StoreFlattenedArrayStub* slow_path = nullptr;
1982
1983 if (needs_flat_array_store_check(x)) {
1984 // Check if we indeed have a flat array
1985 index.load_item();
1986 slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x, x->state_before()));
1987 check_flat_array(array.result(), value.result(), slow_path);
1988 set_in_conditional_code(true);
1989 } else if (needs_null_free_array_store_check(x)) {
1990 CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
1991 check_null_free_array(array, value, info);
1992 }
1993
1994 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1995 if (x->check_boolean()) {
1996 decorators |= C1_MASK_BOOLEAN;
1997 }
1998
1999 access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
2000 nullptr, null_check_info);
2001 if (slow_path != nullptr) {
2002 __ branch_destination(slow_path->continuation());
2003 set_in_conditional_code(false);
2004 }
2005 }
2006 }
2007
2008 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
2009 LIRItem& base, LIR_Opr offset, LIR_Opr result,
2010 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
2011 decorators |= ACCESS_READ;
2012 LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
2013 if (access.is_raw()) {
2014 _barrier_set->BarrierSetC1::load_at(access, result);
2015 } else {
2016 _barrier_set->load_at(access, result);
2017 }
2018 }
2019
2020 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
2021 LIR_Opr addr, LIR_Opr result) {
2022 decorators |= ACCESS_READ;
2023 LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
2024 access.set_resolved_addr(addr);
2025 if (access.is_raw()) {
2123 __ move(LIR_OprFact::oopConst(nullptr), obj);
2124 }
2125 // Emit an explicit null check because the offset is too large.
2126 // If the class is not loaded and the object is null, we need to deoptimize to throw a
2127 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
2128 __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
2129 }
2130
2131 DecoratorSet decorators = IN_HEAP;
2132 if (is_volatile) {
2133 decorators |= MO_SEQ_CST;
2134 }
2135 if (needs_patching) {
2136 decorators |= C1_NEEDS_PATCHING;
2137 }
2138
2139 LIR_Opr result = rlock_result(x, field_type);
2140 access_load_at(decorators, field_type,
2141 object, LIR_OprFact::intConst(x->offset()), result,
2142 info ? new CodeEmitInfo(info) : nullptr, info);
2143
2144 ciField* field = x->field();
2145 if (field->is_null_free()) {
2146 // Load from non-flat inline type field requires
2147 // a null check to replace null with the default value.
2148 ciInstanceKlass* holder = field->holder();
2149 if (field->is_static() && holder->is_loaded()) {
2150 ciObject* val = holder->java_mirror()->field_value(field).as_object();
2151 if (!val->is_null_object()) {
2152 // Static field is initialized, we don't need to perform a null check.
2153 return;
2154 }
2155 }
2156 ciInlineKlass* inline_klass = field->type()->as_inline_klass();
2157 if (inline_klass->is_initialized()) {
2158 LabelObj* L_end = new LabelObj();
2159 __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(nullptr));
2160 __ branch(lir_cond_notEqual, L_end->label());
2161 set_in_conditional_code(true);
2162 Constant* default_value = new Constant(new InstanceConstant(inline_klass->default_instance()));
2163 if (default_value->is_pinned()) {
2164 __ move(LIR_OprFact::value_type(default_value->type()), result);
2165 } else {
2166 __ move(load_constant(default_value), result);
2167 }
2168 __ branch_destination(L_end->label());
2169 set_in_conditional_code(false);
2170 } else {
2171 info = state_for(x, x->state_before());
2172 __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(nullptr));
2173 __ branch(lir_cond_equal, new DeoptimizeStub(info, Deoptimization::Reason_uninitialized,
2174 Deoptimization::Action_make_not_entrant));
2175 }
2176 }
2177 }
2178
2179 // int/long jdk.internal.util.Preconditions.checkIndex
2180 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
2181 assert(x->number_of_arguments() == 3, "wrong type");
2182 LIRItem index(x->argument_at(0), this);
2183 LIRItem length(x->argument_at(1), this);
2184 LIRItem oobef(x->argument_at(2), this);
2185
2186 index.load_item();
2187 length.load_item();
2188 oobef.load_item();
2189
2190 LIR_Opr result = rlock_result(x);
2191 // x->state() is created from copy_state_for_exception, it does not contains arguments
2192 // we should prepare them before entering into interpreter mode due to deoptimization.
2193 ValueStack* state = x->state();
2194 for (int i = 0; i < x->number_of_arguments(); i++) {
2195 Value arg = x->argument_at(i);
2196 state->push(arg->type(), arg);
2301 __ move(LIR_OprFact::oopConst(nullptr), obj);
2302 __ null_check(obj, new CodeEmitInfo(null_check_info));
2303 }
2304 }
2305
2306 if (needs_range_check) {
2307 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2308 __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
2309 } else if (use_length) {
2310 // TODO: use a (modified) version of array_range_check that does not require a
2311 // constant length to be loaded to a register
2312 __ cmp(lir_cond_belowEqual, length.result(), index.result());
2313 __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
2314 } else {
2315 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2316 // The range check performs the null check, so clear it out for the load
2317 null_check_info = nullptr;
2318 }
2319 }
2320
2321 ciMethodData* md = nullptr;
2322 ciArrayLoadData* load_data = nullptr;
2323 if (x->should_profile()) {
2324 if (x->array()->is_loaded_flat_array()) {
2325 // No need to profile a load from a flat array of known type. This can happen if
2326 // the type only became known after optimizations (for example, after the PhiSimplifier).
2327 x->set_should_profile(false);
2328 } else {
2329 int bci = x->profiled_bci();
2330 md = x->profiled_method()->method_data();
2331 assert(md != nullptr, "Sanity");
2332 ciProfileData* data = md->bci_to_data(bci);
2333 assert(data != nullptr && data->is_ArrayLoadData(), "incorrect profiling entry");
2334 load_data = (ciArrayLoadData*)data;
2335 profile_array_type(x, md, load_data);
2336 }
2337 }
2338
2339 Value element;
2340 if (x->vt() != nullptr) {
2341 assert(x->array()->is_loaded_flat_array(), "must be");
2342 // Find the destination address (of the NewInlineTypeInstance).
2343 LIRItem obj_item(x->vt(), this);
2344
2345 access_flat_array(true, array, index, obj_item,
2346 x->delayed() == nullptr ? 0 : x->delayed()->field(),
2347 x->delayed() == nullptr ? 0 : x->delayed()->offset());
2348 set_no_result(x);
2349 } else if (x->delayed() != nullptr) {
2350 assert(x->array()->is_loaded_flat_array(), "must be");
2351 LIR_Opr result = rlock_result(x, x->delayed()->field()->type()->basic_type());
2352 access_sub_element(array, index, result, x->delayed()->field(), x->delayed()->offset());
2353 } else if (x->array() != nullptr && x->array()->is_loaded_flat_array() &&
2354 x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_initialized() &&
2355 x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) {
2356 // Load the default instance instead of reading the element
2357 ciInlineKlass* elem_klass = x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass();
2358 LIR_Opr result = rlock_result(x, x->elt_type());
2359 assert(elem_klass->is_initialized(), "Must be");
2360 Constant* default_value = new Constant(new InstanceConstant(elem_klass->default_instance()));
2361 if (default_value->is_pinned()) {
2362 __ move(LIR_OprFact::value_type(default_value->type()), result);
2363 } else {
2364 __ move(load_constant(default_value), result);
2365 }
2366 } else {
2367 LIR_Opr result = rlock_result(x, x->elt_type());
2368 LoadFlattenedArrayStub* slow_path = nullptr;
2369
2370 if (x->should_profile() && x->array()->maybe_null_free_array()) {
2371 profile_null_free_array(array, md, load_data);
2372 }
2373
2374 if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
2375 assert(x->delayed() == nullptr, "Delayed LoadIndexed only apply to loaded_flat_arrays");
2376 index.load_item();
2377 // if we are loading from a flat array, load it using a runtime call
2378 slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x, x->state_before()));
2379 check_flat_array(array.result(), LIR_OprFact::illegalOpr, slow_path);
2380 set_in_conditional_code(true);
2381 }
2382
2383 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2384 access_load_at(decorators, x->elt_type(),
2385 array, index.result(), result,
2386 nullptr, null_check_info);
2387
2388 if (slow_path != nullptr) {
2389 __ branch_destination(slow_path->continuation());
2390 set_in_conditional_code(false);
2391 }
2392
2393 element = x;
2394 }
2395
2396 if (x->should_profile()) {
2397 profile_element_type(element, md, load_data);
2398 }
2399 }
2400
2401
2402 void LIRGenerator::do_NullCheck(NullCheck* x) {
2403 if (x->can_trap()) {
2404 LIRItem value(x->obj(), this);
2405 value.load_item();
2406 CodeEmitInfo* info = state_for(x);
2407 __ null_check(value.result(), info);
2408 }
2409 }
2410
2411
2412 void LIRGenerator::do_TypeCast(TypeCast* x) {
2413 LIRItem value(x->obj(), this);
2414 value.load_item();
2415 // the result is the same as from the node we are casting
2416 set_result(x, value.result());
2417 }
2418
2880 Compilation* comp = Compilation::current();
2881 if (do_update) {
2882 // try to find exact type, using CHA if possible, so that loading
2883 // the klass from the object can be avoided
2884 ciType* type = obj->exact_type();
2885 if (type == nullptr) {
2886 type = obj->declared_type();
2887 type = comp->cha_exact_type(type);
2888 }
2889 assert(type == nullptr || type->is_klass(), "type should be class");
2890 exact_klass = (type != nullptr && type->is_loaded()) ? (ciKlass*)type : nullptr;
2891
2892 do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2893 }
2894
2895 if (!do_null && !do_update) {
2896 return result;
2897 }
2898
2899 ciKlass* exact_signature_k = nullptr;
2900 if (do_update && signature_at_call_k != nullptr) {
2901 // Is the type from the signature exact (the only one possible)?
2902 exact_signature_k = signature_at_call_k->exact_klass();
2903 if (exact_signature_k == nullptr) {
2904 exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2905 } else {
2906 result = exact_signature_k;
2907 // Known statically. No need to emit any code: prevent
2908 // LIR_Assembler::emit_profile_type() from emitting useless code
2909 profiled_k = ciTypeEntries::with_status(result, profiled_k);
2910 }
2911 // exact_klass and exact_signature_k can be both non null but
2912 // different if exact_klass is loaded after the ciObject for
2913 // exact_signature_k is created.
2914 if (exact_klass == nullptr && exact_signature_k != nullptr && exact_klass != exact_signature_k) {
2915 // sometimes the type of the signature is better than the best type
2916 // the compiler has
2917 exact_klass = exact_signature_k;
2918 }
2919 if (callee_signature_k != nullptr &&
2920 callee_signature_k != signature_at_call_k) {
2965 assert(!src->is_illegal(), "check");
2966 BasicType t = src->type();
2967 if (is_reference_type(t)) {
2968 intptr_t profiled_k = parameters->type(j);
2969 Local* local = x->state()->local_at(java_index)->as_Local();
2970 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2971 in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2972 profiled_k, local, mdp, false, local->declared_type()->as_klass(), nullptr);
2973 // If the profile is known statically set it once for all and do not emit any code
2974 if (exact != nullptr) {
2975 md->set_parameter_type(j, exact);
2976 }
2977 j++;
2978 }
2979 java_index += type2size[t];
2980 }
2981 }
2982 }
2983 }
2984
2985 void LIRGenerator::profile_flags(ciMethodData* md, ciProfileData* data, int flag, LIR_Condition condition) {
2986 assert(md != nullptr && data != nullptr, "should have been initialized");
2987 LIR_Opr mdp = new_register(T_METADATA);
2988 __ metadata2reg(md->constant_encoding(), mdp);
2989 LIR_Address* addr = new LIR_Address(mdp, md->byte_offset_of_slot(data, DataLayout::flags_offset()), T_BYTE);
2990 LIR_Opr flags = new_register(T_INT);
2991 __ move(addr, flags);
2992 if (condition != lir_cond_always) {
2993 LIR_Opr update = new_register(T_INT);
2994 __ cmove(condition, LIR_OprFact::intConst(0), LIR_OprFact::intConst(flag), update, T_INT);
2995 } else {
2996 __ logical_or(flags, LIR_OprFact::intConst(flag), flags);
2997 }
2998 __ store(flags, addr);
2999 }
3000
3001 template <class ArrayData> void LIRGenerator::profile_null_free_array(LIRItem array, ciMethodData* md, ArrayData* load_store) {
3002 assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3003 LabelObj* L_end = new LabelObj();
3004 LIR_Opr tmp = new_register(T_METADATA);
3005 __ check_null_free_array(array.result(), tmp);
3006
3007 profile_flags(md, load_store, ArrayStoreData::null_free_array_byte_constant(), lir_cond_equal);
3008 }
3009
3010 template <class ArrayData> void LIRGenerator::profile_array_type(AccessIndexed* x, ciMethodData*& md, ArrayData*& load_store) {
3011 assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3012 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3013 profile_type(md, md->byte_offset_of_slot(load_store, ArrayData::array_offset()), 0,
3014 load_store->array()->type(), x->array(), mdp, true, nullptr, nullptr);
3015 }
3016
3017 void LIRGenerator::profile_element_type(Value element, ciMethodData* md, ciArrayLoadData* load_data) {
3018 assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3019 assert(md != nullptr && load_data != nullptr, "should have been initialized");
3020 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3021 profile_type(md, md->byte_offset_of_slot(load_data, ArrayLoadData::element_offset()), 0,
3022 load_data->element()->type(), element, mdp, false, nullptr, nullptr);
3023 }
3024
3025 void LIRGenerator::do_Base(Base* x) {
3026 __ std_entry(LIR_OprFact::illegalOpr);
3027 // Emit moves from physical registers / stack slots to virtual registers
3028 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
3029 IRScope* irScope = compilation()->hir()->top_scope();
3030 int java_index = 0;
3031 for (int i = 0; i < args->length(); i++) {
3032 LIR_Opr src = args->at(i);
3033 assert(!src->is_illegal(), "check");
3034 BasicType t = src->type();
3035
3036 // Types which are smaller than int are passed as int, so
3037 // correct the type which passed.
3038 switch (t) {
3039 case T_BYTE:
3040 case T_BOOLEAN:
3041 case T_SHORT:
3042 case T_CHAR:
3043 t = T_INT;
3044 break;
3085 }
3086 assert(obj->is_valid(), "must be valid");
3087
3088 if (method()->is_synchronized() && GenerateSynchronizationCode) {
3089 LIR_Opr lock = syncLockOpr();
3090 __ load_stack_address_monitor(0, lock);
3091
3092 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, x->check_flag(Instruction::DeoptimizeOnException));
3093 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
3094
3095 // receiver is guaranteed non-null so don't need CodeEmitInfo
3096 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, nullptr);
3097 }
3098 }
3099 // increment invocation counters if needed
3100 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
3101 profile_parameters(x);
3102 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, false);
3103 increment_invocation_counter(info);
3104 }
3105 if (method()->has_scalarized_args()) {
3106 // Check if deoptimization was triggered (i.e. orig_pc was set) while buffering scalarized inline type arguments
3107 // in the entry point (see comments in frame::deoptimize). If so, deoptimize only now that we have the right state.
3108 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), nullptr, false);
3109 CodeStub* deopt_stub = new DeoptimizeStub(info, Deoptimization::Reason_none, Deoptimization::Action_none);
3110 __ append(new LIR_Op0(lir_check_orig_pc));
3111 __ branch(lir_cond_notEqual, deopt_stub);
3112 }
3113
3114 // all blocks with a successor must end with an unconditional jump
3115 // to the successor even if they are consecutive
3116 __ jump(x->default_sux());
3117 }
3118
3119
3120 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
3121 // construct our frame and model the production of incoming pointer
3122 // to the OSR buffer.
3123 __ osr_entry(LIR_Assembler::osrBufferPointer());
3124 LIR_Opr result = rlock_result(x);
3125 __ move(LIR_Assembler::osrBufferPointer(), result);
3126 }
3127
3128 void LIRGenerator::invoke_load_one_argument(LIRItem* param, LIR_Opr loc) {
3129 if (loc->is_register()) {
3130 param->load_item_force(loc);
3131 } else {
3132 LIR_Address* addr = loc->as_address_ptr();
3133 param->load_for_store(addr->type());
3134 if (addr->type() == T_OBJECT) {
3135 __ move_wide(param->result(), addr);
3136 } else {
3137 __ move(param->result(), addr);
3138 }
3139 }
3140 }
3141
3142 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
3143 assert(args->length() == arg_list->length(),
3144 "args=%d, arg_list=%d", args->length(), arg_list->length());
3145 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
3146 LIRItem* param = args->at(i);
3147 LIR_Opr loc = arg_list->at(i);
3148 invoke_load_one_argument(param, loc);
3149 }
3150
3151 if (x->has_receiver()) {
3152 LIRItem* receiver = args->at(0);
3153 LIR_Opr loc = arg_list->at(0);
3154 if (loc->is_register()) {
3155 receiver->load_item_force(loc);
3156 } else {
3157 assert(loc->is_address(), "just checking");
3158 receiver->load_for_store(T_OBJECT);
3159 __ move_wide(receiver->result(), loc->as_address_ptr());
3160 }
3161 }
3162 }
3163
3164
3165 // Visits all arguments, returns appropriate items without loading them
3166 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
3167 LIRItemList* argument_items = new LIRItemList();
3168 if (x->has_receiver()) {
3294 __ move(tmp, reg);
3295 }
3296
3297
3298
3299 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3300 void LIRGenerator::do_IfOp(IfOp* x) {
3301 #ifdef ASSERT
3302 {
3303 ValueTag xtag = x->x()->type()->tag();
3304 ValueTag ttag = x->tval()->type()->tag();
3305 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3306 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3307 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3308 }
3309 #endif
3310
3311 LIRItem left(x->x(), this);
3312 LIRItem right(x->y(), this);
3313 left.load_item();
3314 if (can_inline_as_constant(right.value()) && !x->substitutability_check()) {
3315 right.dont_load_item();
3316 } else {
3317 // substitutability_check() needs to use right as a base register.
3318 right.load_item();
3319 }
3320
3321 LIRItem t_val(x->tval(), this);
3322 LIRItem f_val(x->fval(), this);
3323 t_val.dont_load_item();
3324 f_val.dont_load_item();
3325
3326 if (x->substitutability_check()) {
3327 substitutability_check(x, left, right, t_val, f_val);
3328 } else {
3329 LIR_Opr reg = rlock_result(x);
3330 __ cmp(lir_cond(x->cond()), left.result(), right.result());
3331 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3332 }
3333 }
3334
3335 void LIRGenerator::substitutability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val) {
3336 assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3337 bool is_acmpeq = (x->cond() == If::eql);
3338 LIR_Opr equal_result = is_acmpeq ? t_val.result() : f_val.result();
3339 LIR_Opr not_equal_result = is_acmpeq ? f_val.result() : t_val.result();
3340 LIR_Opr result = rlock_result(x);
3341 CodeEmitInfo* info = state_for(x, x->state_before());
3342
3343 substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3344 }
3345
3346 void LIRGenerator::substitutability_check(If* x, LIRItem& left, LIRItem& right) {
3347 LIR_Opr equal_result = LIR_OprFact::intConst(1);
3348 LIR_Opr not_equal_result = LIR_OprFact::intConst(0);
3349 LIR_Opr result = new_register(T_INT);
3350 CodeEmitInfo* info = state_for(x, x->state_before());
3351
3352 substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3353
3354 assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3355 __ cmp(lir_cond(x->cond()), result, equal_result);
3356 }
3357
3358 void LIRGenerator::substitutability_check_common(Value left_val, Value right_val, LIRItem& left, LIRItem& right,
3359 LIR_Opr equal_result, LIR_Opr not_equal_result, LIR_Opr result,
3360 CodeEmitInfo* info) {
3361 LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
3362 LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
3363 LIR_Opr left_klass_op = LIR_OprFact::illegalOpr;
3364 LIR_Opr right_klass_op = LIR_OprFact::illegalOpr;
3365
3366 ciKlass* left_klass = left_val ->as_loaded_klass_or_null();
3367 ciKlass* right_klass = right_val->as_loaded_klass_or_null();
3368
3369 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
3370 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
3371 init_temps_for_substitutability_check(tmp1, tmp2);
3372 }
3373
3374 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
3375 // No need to load klass -- the operands are statically known to be the same inline klass.
3376 } else {
3377 BasicType t_klass = UseCompressedOops ? T_INT : T_METADATA;
3378 left_klass_op = new_register(t_klass);
3379 right_klass_op = new_register(t_klass);
3380 }
3381
3382 CodeStub* slow_path = new SubstitutabilityCheckStub(left.result(), right.result(), info);
3383 __ substitutability_check(result, left.result(), right.result(), equal_result, not_equal_result,
3384 tmp1, tmp2,
3385 left_klass, right_klass, left_klass_op, right_klass_op, info, slow_path);
3386 }
3387
3388 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
3389 assert(x->number_of_arguments() == 0, "wrong type");
3390 // Enforce computation of _reserved_argument_area_size which is required on some platforms.
3391 BasicTypeList signature;
3392 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
3393 LIR_Opr reg = result_register_for(x->type());
3394 __ call_runtime_leaf(routine, getThreadTemp(),
3395 reg, new LIR_OprList());
3396 LIR_Opr result = rlock_result(x);
3397 __ move(reg, result);
3398 }
3399
3400
3401
3402 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3403 switch (x->id()) {
3404 case vmIntrinsics::_intBitsToFloat :
3405 case vmIntrinsics::_doubleToRawLongBits :
3639 if (x->recv() != nullptr || x->nb_profiled_args() > 0) {
3640 profile_parameters_at_call(x);
3641 }
3642
3643 if (x->recv() != nullptr) {
3644 LIRItem value(x->recv(), this);
3645 value.load_item();
3646 recv = new_register(T_OBJECT);
3647 __ move(value.result(), recv);
3648 }
3649 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3650 }
3651
3652 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3653 int bci = x->bci_of_invoke();
3654 ciMethodData* md = x->method()->method_data_or_null();
3655 assert(md != nullptr, "Sanity");
3656 ciProfileData* data = md->bci_to_data(bci);
3657 if (data != nullptr) {
3658 assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3659 ciSingleTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3660 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3661
3662 bool ignored_will_link;
3663 ciSignature* signature_at_call = nullptr;
3664 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3665
3666 // The offset within the MDO of the entry to update may be too large
3667 // to be used in load/store instructions on some platforms. So have
3668 // profile_type() compute the address of the profile in a register.
3669 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3670 ret->type(), x->ret(), mdp,
3671 !x->needs_null_check(),
3672 signature_at_call->return_type()->as_klass(),
3673 x->callee()->signature()->return_type()->as_klass());
3674 if (exact != nullptr) {
3675 md->set_return_type(bci, exact);
3676 }
3677 }
3678 }
3679
3680 bool LIRGenerator::profile_inline_klass(ciMethodData* md, ciProfileData* data, Value value, int flag) {
3681 ciKlass* klass = value->as_loaded_klass_or_null();
3682 if (klass != nullptr) {
3683 if (klass->is_inlinetype()) {
3684 profile_flags(md, data, flag, lir_cond_always);
3685 } else if (klass->can_be_inline_klass()) {
3686 return false;
3687 }
3688 } else {
3689 return false;
3690 }
3691 return true;
3692 }
3693
3694
3695 void LIRGenerator::do_ProfileACmpTypes(ProfileACmpTypes* x) {
3696 ciMethod* method = x->method();
3697 assert(method != nullptr, "method should be set if branch is profiled");
3698 ciMethodData* md = method->method_data_or_null();
3699 assert(md != nullptr, "Sanity");
3700 ciProfileData* data = md->bci_to_data(x->bci());
3701 assert(data != nullptr, "must have profiling data");
3702 assert(data->is_ACmpData(), "need BranchData for two-way branches");
3703 ciACmpData* acmp = (ciACmpData*)data;
3704 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3705 profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()), 0,
3706 acmp->left()->type(), x->left(), mdp, !x->left_maybe_null(), nullptr, nullptr);
3707 int flags_offset = md->byte_offset_of_slot(data, DataLayout::flags_offset());
3708 if (!profile_inline_klass(md, acmp, x->left(), ACmpData::left_inline_type_byte_constant())) {
3709 LIR_Opr mdp = new_register(T_METADATA);
3710 __ metadata2reg(md->constant_encoding(), mdp);
3711 LIRItem value(x->left(), this);
3712 value.load_item();
3713 __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::left_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3714 }
3715 profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()),
3716 in_bytes(ACmpData::right_offset()) - in_bytes(ACmpData::left_offset()),
3717 acmp->right()->type(), x->right(), mdp, !x->right_maybe_null(), nullptr, nullptr);
3718 if (!profile_inline_klass(md, acmp, x->right(), ACmpData::right_inline_type_byte_constant())) {
3719 LIR_Opr mdp = new_register(T_METADATA);
3720 __ metadata2reg(md->constant_encoding(), mdp);
3721 LIRItem value(x->right(), this);
3722 value.load_item();
3723 __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::right_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3724 }
3725 }
3726
3727 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3728 // We can safely ignore accessors here, since c2 will inline them anyway,
3729 // accessors are also always mature.
3730 if (!x->inlinee()->is_accessor()) {
3731 CodeEmitInfo* info = state_for(x, x->state(), true);
3732 // Notify the runtime very infrequently only to take care of counter overflows
3733 int freq_log = Tier23InlineeNotifyFreqLog;
3734 double scale;
3735 if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {
3736 freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3737 }
3738 increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3739 }
3740 }
3741
3742 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3743 if (compilation()->is_profiling()) {
3744 #if defined(X86) && !defined(_LP64)
3745 // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3746 LIR_Opr left_copy = new_register(left->type());
|