14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_LIRGenerator.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "ci/ciObjArray.hpp"
36 #include "ci/ciUtilities.hpp"
37 #include "compiler/compilerDefinitions.inline.hpp"
38 #include "compiler/compilerOracle.hpp"
39 #include "gc/shared/barrierSet.hpp"
40 #include "gc/shared/c1/barrierSetC1.hpp"
41 #include "oops/klass.inline.hpp"
42 #include "oops/methodCounters.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "runtime/vm_version.hpp"
46 #include "utilities/bitMap.inline.hpp"
47 #include "utilities/macros.hpp"
48 #include "utilities/powerOfTwo.hpp"
49
50 #ifdef ASSERT
51 #define __ gen()->lir(__FILE__, __LINE__)->
52 #else
53 #define __ gen()->lir()->
199 }
200
201
202 //--------------------------------------------------------------
203 // LIRItem
204
205 void LIRItem::set_result(LIR_Opr opr) {
206 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
207 value()->set_operand(opr);
208
209 #ifdef ASSERT
210 if (opr->is_virtual()) {
211 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), nullptr);
212 }
213 #endif
214
215 _result = opr;
216 }
217
218 void LIRItem::load_item() {
219 if (result()->is_illegal()) {
220 // update the items result
221 _result = value()->operand();
222 }
223 if (!result()->is_register()) {
224 LIR_Opr reg = _gen->new_register(value()->type());
225 __ move(result(), reg);
226 if (result()->is_constant()) {
227 _result = reg;
228 } else {
229 set_result(reg);
230 }
231 }
232 }
233
234
235 void LIRItem::load_for_store(BasicType type) {
236 if (_gen->can_store_as_constant(value(), type)) {
237 _result = value()->operand();
238 if (!_result->is_constant()) {
606 assert(right_op != result_op, "malformed");
607 __ move(left_op, result_op);
608 left_op = result_op;
609 }
610
611 switch(code) {
612 case Bytecodes::_iand:
613 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
614
615 case Bytecodes::_ior:
616 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
617
618 case Bytecodes::_ixor:
619 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
620
621 default: ShouldNotReachHere();
622 }
623 }
624
625
626 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
627 if (!GenerateSynchronizationCode) return;
628 // for slow path, use debug info for state after successful locking
629 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
630 __ load_stack_address_monitor(monitor_no, lock);
631 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
632 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
633 }
634
635
636 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
637 if (!GenerateSynchronizationCode) return;
638 // setup registers
639 LIR_Opr hdr = lock;
640 lock = new_hdr;
641 CodeStub* slow_path = new MonitorExitStub(lock, LockingMode != LM_MONITOR, monitor_no);
642 __ load_stack_address_monitor(monitor_no, lock);
643 __ unlock_object(hdr, object, lock, scratch, slow_path);
644 }
645
646 #ifndef PRODUCT
647 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
648 if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
649 tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
650 } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
651 tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
652 }
653 }
654 #endif
655
656 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
657 klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
658 // If klass is not loaded we do not know if the klass has finalizers:
659 if (UseFastNewInstance && klass->is_loaded()
660 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
661
662 C1StubId stub_id = klass->is_initialized() ? C1StubId::fast_new_instance_id : C1StubId::fast_new_instance_init_check_id;
663
664 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
665
666 assert(klass->is_loaded(), "must be loaded");
667 // allocate space for instance
668 assert(klass->size_helper() > 0, "illegal instance size");
669 const int instance_size = align_object_size(klass->size_helper());
670 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
671 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
672 } else {
673 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, C1StubId::new_instance_id);
674 __ branch(lir_cond_always, slow_path);
675 __ branch_destination(slow_path->continuation());
676 }
677 }
678
679
680 static bool is_constant_zero(Instruction* inst) {
681 IntConstant* c = inst->type()->as_IntConstant();
682 if (c) {
683 return (c->value() == 0);
684 }
685 return false;
686 }
687
688
689 static bool positive_constant(Instruction* inst) {
690 IntConstant* c = inst->type()->as_IntConstant();
691 if (c) {
692 return (c->value() >= 0);
693 }
694 return false;
754 if (src_type != nullptr) {
755 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
756 is_exact = true;
757 expected_type = dst_type;
758 }
759 }
760 }
761 // at least pass along a good guess
762 if (expected_type == nullptr) expected_type = dst_exact_type;
763 if (expected_type == nullptr) expected_type = src_declared_type;
764 if (expected_type == nullptr) expected_type = dst_declared_type;
765
766 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
767 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
768 }
769
770 // if a probable array type has been identified, figure out if any
771 // of the required checks for a fast case can be elided.
772 int flags = LIR_OpArrayCopy::all_flags;
773
774 if (!src_objarray)
775 flags &= ~LIR_OpArrayCopy::src_objarray;
776 if (!dst_objarray)
777 flags &= ~LIR_OpArrayCopy::dst_objarray;
778
779 if (!x->arg_needs_null_check(0))
780 flags &= ~LIR_OpArrayCopy::src_null_check;
781 if (!x->arg_needs_null_check(2))
782 flags &= ~LIR_OpArrayCopy::dst_null_check;
783
784
785 if (expected_type != nullptr) {
786 Value length_limit = nullptr;
787
788 IfOp* ifop = length->as_IfOp();
789 if (ifop != nullptr) {
790 // look for expressions like min(v, a.length) which ends up as
791 // x > y ? y : x or x >= y ? y : x
792 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
793 ifop->x() == ifop->fval() &&
1307 // Example: Foo.class.getModifiers()
1308 void LIRGenerator::do_getModifiers(Intrinsic* x) {
1309 assert(x->number_of_arguments() == 1, "wrong type");
1310
1311 LIRItem receiver(x->argument_at(0), this);
1312 receiver.load_item();
1313 LIR_Opr result = rlock_result(x);
1314
1315 CodeEmitInfo* info = nullptr;
1316 if (x->needs_null_check()) {
1317 info = state_for(x);
1318 }
1319
1320 // While reading off the universal constant mirror is less efficient than doing
1321 // another branch and returning the constant answer, this branchless code runs into
1322 // much less risk of confusion for C1 register allocator. The choice of the universe
1323 // object here is correct as long as it returns the same modifiers we would expect
1324 // from the primitive class itself. See spec for Class.getModifiers that provides
1325 // the typed array klasses with similar modifiers as their component types.
1326
1327 Klass* univ_klass = Universe::byteArrayKlass();
1328 assert(univ_klass->modifier_flags() == (JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC), "Sanity");
1329 LIR_Opr prim_klass = LIR_OprFact::metadataConst(univ_klass);
1330
1331 LIR_Opr recv_klass = new_register(T_METADATA);
1332 __ move(new LIR_Address(receiver.result(), java_lang_Class::klass_offset(), T_ADDRESS), recv_klass, info);
1333
1334 // Check if this is a Java mirror of primitive type, and select the appropriate klass.
1335 LIR_Opr klass = new_register(T_METADATA);
1336 __ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(nullptr));
1337 __ cmove(lir_cond_equal, prim_klass, recv_klass, klass, T_ADDRESS);
1338
1339 // Get the answer.
1340 __ move(new LIR_Address(klass, in_bytes(Klass::modifier_flags_offset()), T_INT), result);
1341 }
1342
1343 void LIRGenerator::do_getObjectSize(Intrinsic* x) {
1344 assert(x->number_of_arguments() == 3, "wrong type");
1345 LIR_Opr result_reg = rlock_result(x);
1346
1347 LIRItem value(x->argument_at(2), this);
1348 value.load_item();
1349
1350 LIR_Opr klass = new_register(T_METADATA);
1351 load_klass(value.result(), klass, nullptr);
1352 LIR_Opr layout = new_register(T_INT);
1353 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
1354
1355 LabelObj* L_done = new LabelObj();
1356 LabelObj* L_array = new LabelObj();
1357
1358 __ cmp(lir_cond_lessEqual, layout, 0);
1359 __ branch(lir_cond_lessEqual, L_array->label());
1360
1553 case T_FLOAT:
1554 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1555 break;
1556 case T_LONG:
1557 case T_DOUBLE:
1558 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1559 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1560 break;
1561 case T_OBJECT:
1562 if (c->as_jobject() != other->as_jobject()) continue;
1563 break;
1564 default:
1565 break;
1566 }
1567 return _reg_for_constants.at(i);
1568 }
1569 }
1570
1571 LIR_Opr result = new_register(t);
1572 __ move((LIR_Opr)c, result);
1573 _constants.append(c);
1574 _reg_for_constants.append(result);
1575 return result;
1576 }
1577
1578 //------------------------field access--------------------------------------
1579
1580 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1581 assert(x->number_of_arguments() == 4, "wrong type");
1582 LIRItem obj (x->argument_at(0), this); // object
1583 LIRItem offset(x->argument_at(1), this); // offset of field
1584 LIRItem cmp (x->argument_at(2), this); // value to compare with field
1585 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
1586 assert(obj.type()->tag() == objectTag, "invalid type");
1587 assert(cmp.type()->tag() == type->tag(), "invalid type");
1588 assert(val.type()->tag() == type->tag(), "invalid type");
1589
1590 LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1591 obj, offset, cmp, val);
1592 set_result(x, result);
1593 }
1594
1595 // Comment copied form templateTable_i486.cpp
1596 // ----------------------------------------------------------------------------
1597 // Volatile variables demand their effects be made known to all CPU's in
1670 (needs_patching ||
1671 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1672 // Emit an explicit null check because the offset is too large.
1673 // If the class is not loaded and the object is null, we need to deoptimize to throw a
1674 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1675 __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1676 }
1677
1678 DecoratorSet decorators = IN_HEAP;
1679 if (is_volatile) {
1680 decorators |= MO_SEQ_CST;
1681 }
1682 if (needs_patching) {
1683 decorators |= C1_NEEDS_PATCHING;
1684 }
1685
1686 access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1687 value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
1688 }
1689
1690 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1691 assert(x->is_pinned(),"");
1692 bool needs_range_check = x->compute_needs_range_check();
1693 bool use_length = x->length() != nullptr;
1694 bool obj_store = is_reference_type(x->elt_type());
1695 bool needs_store_check = obj_store && (x->value()->as_Constant() == nullptr ||
1696 !get_jobject_constant(x->value())->is_null_object() ||
1697 x->should_profile());
1698
1699 LIRItem array(x->array(), this);
1700 LIRItem index(x->index(), this);
1701 LIRItem value(x->value(), this);
1702 LIRItem length(this);
1703
1704 array.load_item();
1705 index.load_nonconstant();
1706
1707 if (use_length && needs_range_check) {
1708 length.set_instruction(x->length());
1709 length.load_item();
1710
1711 }
1712 if (needs_store_check || x->check_boolean()) {
1713 value.load_item();
1714 } else {
1715 value.load_for_store(x->elt_type());
1716 }
1717
1718 set_no_result(x);
1719
1720 // the CodeEmitInfo must be duplicated for each different
1721 // LIR-instruction because spilling can occur anywhere between two
1722 // instructions and so the debug information must be different
1723 CodeEmitInfo* range_check_info = state_for(x);
1724 CodeEmitInfo* null_check_info = nullptr;
1725 if (x->needs_null_check()) {
1726 null_check_info = new CodeEmitInfo(range_check_info);
1727 }
1728
1729 if (needs_range_check) {
1730 if (use_length) {
1731 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1732 __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1733 } else {
1734 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1735 // range_check also does the null check
1736 null_check_info = nullptr;
1737 }
1738 }
1739
1740 if (GenerateArrayStoreCheck && needs_store_check) {
1741 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1742 array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1743 }
1744
1745 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1746 if (x->check_boolean()) {
1747 decorators |= C1_MASK_BOOLEAN;
1748 }
1749
1750 access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1751 nullptr, null_check_info);
1752 }
1753
1754 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1755 LIRItem& base, LIR_Opr offset, LIR_Opr result,
1756 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1757 decorators |= ACCESS_READ;
1758 LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1759 if (access.is_raw()) {
1760 _barrier_set->BarrierSetC1::load_at(access, result);
1761 } else {
1762 _barrier_set->load_at(access, result);
1763 }
1764 }
1765
1766 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1767 LIR_Opr addr, LIR_Opr result) {
1768 decorators |= ACCESS_READ;
1769 LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1770 access.set_resolved_addr(addr);
1771 if (access.is_raw()) {
1869 __ move(LIR_OprFact::oopConst(nullptr), obj);
1870 }
1871 // Emit an explicit null check because the offset is too large.
1872 // If the class is not loaded and the object is null, we need to deoptimize to throw a
1873 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1874 __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1875 }
1876
1877 DecoratorSet decorators = IN_HEAP;
1878 if (is_volatile) {
1879 decorators |= MO_SEQ_CST;
1880 }
1881 if (needs_patching) {
1882 decorators |= C1_NEEDS_PATCHING;
1883 }
1884
1885 LIR_Opr result = rlock_result(x, field_type);
1886 access_load_at(decorators, field_type,
1887 object, LIR_OprFact::intConst(x->offset()), result,
1888 info ? new CodeEmitInfo(info) : nullptr, info);
1889 }
1890
1891 // int/long jdk.internal.util.Preconditions.checkIndex
1892 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
1893 assert(x->number_of_arguments() == 3, "wrong type");
1894 LIRItem index(x->argument_at(0), this);
1895 LIRItem length(x->argument_at(1), this);
1896 LIRItem oobef(x->argument_at(2), this);
1897
1898 index.load_item();
1899 length.load_item();
1900 oobef.load_item();
1901
1902 LIR_Opr result = rlock_result(x);
1903 // x->state() is created from copy_state_for_exception, it does not contains arguments
1904 // we should prepare them before entering into interpreter mode due to deoptimization.
1905 ValueStack* state = x->state();
1906 for (int i = 0; i < x->number_of_arguments(); i++) {
1907 Value arg = x->argument_at(i);
1908 state->push(arg->type(), arg);
2013 __ move(LIR_OprFact::oopConst(nullptr), obj);
2014 __ null_check(obj, new CodeEmitInfo(null_check_info));
2015 }
2016 }
2017
2018 if (needs_range_check) {
2019 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2020 __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
2021 } else if (use_length) {
2022 // TODO: use a (modified) version of array_range_check that does not require a
2023 // constant length to be loaded to a register
2024 __ cmp(lir_cond_belowEqual, length.result(), index.result());
2025 __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
2026 } else {
2027 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2028 // The range check performs the null check, so clear it out for the load
2029 null_check_info = nullptr;
2030 }
2031 }
2032
2033 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2034
2035 LIR_Opr result = rlock_result(x, x->elt_type());
2036 access_load_at(decorators, x->elt_type(),
2037 array, index.result(), result,
2038 nullptr, null_check_info);
2039 }
2040
2041
2042 void LIRGenerator::do_NullCheck(NullCheck* x) {
2043 if (x->can_trap()) {
2044 LIRItem value(x->obj(), this);
2045 value.load_item();
2046 CodeEmitInfo* info = state_for(x);
2047 __ null_check(value.result(), info);
2048 }
2049 }
2050
2051
2052 void LIRGenerator::do_TypeCast(TypeCast* x) {
2053 LIRItem value(x->obj(), this);
2054 value.load_item();
2055 // the result is the same as from the node we are casting
2056 set_result(x, value.result());
2057 }
2058
2520 Compilation* comp = Compilation::current();
2521 if (do_update) {
2522 // try to find exact type, using CHA if possible, so that loading
2523 // the klass from the object can be avoided
2524 ciType* type = obj->exact_type();
2525 if (type == nullptr) {
2526 type = obj->declared_type();
2527 type = comp->cha_exact_type(type);
2528 }
2529 assert(type == nullptr || type->is_klass(), "type should be class");
2530 exact_klass = (type != nullptr && type->is_loaded()) ? (ciKlass*)type : nullptr;
2531
2532 do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2533 }
2534
2535 if (!do_null && !do_update) {
2536 return result;
2537 }
2538
2539 ciKlass* exact_signature_k = nullptr;
2540 if (do_update) {
2541 // Is the type from the signature exact (the only one possible)?
2542 exact_signature_k = signature_at_call_k->exact_klass();
2543 if (exact_signature_k == nullptr) {
2544 exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2545 } else {
2546 result = exact_signature_k;
2547 // Known statically. No need to emit any code: prevent
2548 // LIR_Assembler::emit_profile_type() from emitting useless code
2549 profiled_k = ciTypeEntries::with_status(result, profiled_k);
2550 }
2551 // exact_klass and exact_signature_k can be both non null but
2552 // different if exact_klass is loaded after the ciObject for
2553 // exact_signature_k is created.
2554 if (exact_klass == nullptr && exact_signature_k != nullptr && exact_klass != exact_signature_k) {
2555 // sometimes the type of the signature is better than the best type
2556 // the compiler has
2557 exact_klass = exact_signature_k;
2558 }
2559 if (callee_signature_k != nullptr &&
2560 callee_signature_k != signature_at_call_k) {
2605 assert(!src->is_illegal(), "check");
2606 BasicType t = src->type();
2607 if (is_reference_type(t)) {
2608 intptr_t profiled_k = parameters->type(j);
2609 Local* local = x->state()->local_at(java_index)->as_Local();
2610 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2611 in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2612 profiled_k, local, mdp, false, local->declared_type()->as_klass(), nullptr);
2613 // If the profile is known statically set it once for all and do not emit any code
2614 if (exact != nullptr) {
2615 md->set_parameter_type(j, exact);
2616 }
2617 j++;
2618 }
2619 java_index += type2size[t];
2620 }
2621 }
2622 }
2623 }
2624
2625 void LIRGenerator::do_Base(Base* x) {
2626 __ std_entry(LIR_OprFact::illegalOpr);
2627 // Emit moves from physical registers / stack slots to virtual registers
2628 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2629 IRScope* irScope = compilation()->hir()->top_scope();
2630 int java_index = 0;
2631 for (int i = 0; i < args->length(); i++) {
2632 LIR_Opr src = args->at(i);
2633 assert(!src->is_illegal(), "check");
2634 BasicType t = src->type();
2635
2636 // Types which are smaller than int are passed as int, so
2637 // correct the type which passed.
2638 switch (t) {
2639 case T_BYTE:
2640 case T_BOOLEAN:
2641 case T_SHORT:
2642 case T_CHAR:
2643 t = T_INT;
2644 break;
2687 }
2688 assert(obj->is_valid(), "must be valid");
2689
2690 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2691 LIR_Opr lock = syncLockOpr();
2692 __ load_stack_address_monitor(0, lock);
2693
2694 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, x->check_flag(Instruction::DeoptimizeOnException));
2695 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2696
2697 // receiver is guaranteed non-null so don't need CodeEmitInfo
2698 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, nullptr);
2699 }
2700 }
2701 // increment invocation counters if needed
2702 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2703 profile_parameters(x);
2704 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, false);
2705 increment_invocation_counter(info);
2706 }
2707
2708 // all blocks with a successor must end with an unconditional jump
2709 // to the successor even if they are consecutive
2710 __ jump(x->default_sux());
2711 }
2712
2713
2714 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2715 // construct our frame and model the production of incoming pointer
2716 // to the OSR buffer.
2717 __ osr_entry(LIR_Assembler::osrBufferPointer());
2718 LIR_Opr result = rlock_result(x);
2719 __ move(LIR_Assembler::osrBufferPointer(), result);
2720 }
2721
2722
2723 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2724 assert(args->length() == arg_list->length(),
2725 "args=%d, arg_list=%d", args->length(), arg_list->length());
2726 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2727 LIRItem* param = args->at(i);
2728 LIR_Opr loc = arg_list->at(i);
2729 if (loc->is_register()) {
2730 param->load_item_force(loc);
2731 } else {
2732 LIR_Address* addr = loc->as_address_ptr();
2733 param->load_for_store(addr->type());
2734 if (addr->type() == T_OBJECT) {
2735 __ move_wide(param->result(), addr);
2736 } else
2737 __ move(param->result(), addr);
2738 }
2739 }
2740
2741 if (x->has_receiver()) {
2742 LIRItem* receiver = args->at(0);
2743 LIR_Opr loc = arg_list->at(0);
2744 if (loc->is_register()) {
2745 receiver->load_item_force(loc);
2746 } else {
2747 assert(loc->is_address(), "just checking");
2748 receiver->load_for_store(T_OBJECT);
2749 __ move_wide(receiver->result(), loc->as_address_ptr());
2750 }
2751 }
2752 }
2753
2754
2755 // Visits all arguments, returns appropriate items without loading them
2756 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2757 LIRItemList* argument_items = new LIRItemList();
2758 if (x->has_receiver()) {
2884 __ move(tmp, reg);
2885 }
2886
2887
2888
2889 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2890 void LIRGenerator::do_IfOp(IfOp* x) {
2891 #ifdef ASSERT
2892 {
2893 ValueTag xtag = x->x()->type()->tag();
2894 ValueTag ttag = x->tval()->type()->tag();
2895 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2896 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2897 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2898 }
2899 #endif
2900
2901 LIRItem left(x->x(), this);
2902 LIRItem right(x->y(), this);
2903 left.load_item();
2904 if (can_inline_as_constant(right.value())) {
2905 right.dont_load_item();
2906 } else {
2907 right.load_item();
2908 }
2909
2910 LIRItem t_val(x->tval(), this);
2911 LIRItem f_val(x->fval(), this);
2912 t_val.dont_load_item();
2913 f_val.dont_load_item();
2914 LIR_Opr reg = rlock_result(x);
2915
2916 __ cmp(lir_cond(x->cond()), left.result(), right.result());
2917 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
2918 }
2919
2920 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
2921 assert(x->number_of_arguments() == 0, "wrong type");
2922 // Enforce computation of _reserved_argument_area_size which is required on some platforms.
2923 BasicTypeList signature;
2924 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2925 LIR_Opr reg = result_register_for(x->type());
2926 __ call_runtime_leaf(routine, getThreadTemp(),
2927 reg, new LIR_OprList());
2928 LIR_Opr result = rlock_result(x);
2929 __ move(reg, result);
2930 }
2931
2932
2933
2934 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2935 switch (x->id()) {
2936 case vmIntrinsics::_intBitsToFloat :
2937 case vmIntrinsics::_doubleToRawLongBits :
3172 if (x->recv() != nullptr || x->nb_profiled_args() > 0) {
3173 profile_parameters_at_call(x);
3174 }
3175
3176 if (x->recv() != nullptr) {
3177 LIRItem value(x->recv(), this);
3178 value.load_item();
3179 recv = new_register(T_OBJECT);
3180 __ move(value.result(), recv);
3181 }
3182 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3183 }
3184
3185 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3186 int bci = x->bci_of_invoke();
3187 ciMethodData* md = x->method()->method_data_or_null();
3188 assert(md != nullptr, "Sanity");
3189 ciProfileData* data = md->bci_to_data(bci);
3190 if (data != nullptr) {
3191 assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3192 ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3193 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3194
3195 bool ignored_will_link;
3196 ciSignature* signature_at_call = nullptr;
3197 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3198
3199 // The offset within the MDO of the entry to update may be too large
3200 // to be used in load/store instructions on some platforms. So have
3201 // profile_type() compute the address of the profile in a register.
3202 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3203 ret->type(), x->ret(), mdp,
3204 !x->needs_null_check(),
3205 signature_at_call->return_type()->as_klass(),
3206 x->callee()->signature()->return_type()->as_klass());
3207 if (exact != nullptr) {
3208 md->set_return_type(bci, exact);
3209 }
3210 }
3211 }
3212
3213 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3214 // We can safely ignore accessors here, since c2 will inline them anyway,
3215 // accessors are also always mature.
3216 if (!x->inlinee()->is_accessor()) {
3217 CodeEmitInfo* info = state_for(x, x->state(), true);
3218 // Notify the runtime very infrequently only to take care of counter overflows
3219 int freq_log = Tier23InlineeNotifyFreqLog;
3220 double scale;
3221 if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
3222 freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3223 }
3224 increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3225 }
3226 }
3227
3228 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3229 if (compilation()->is_profiling()) {
3230 #if defined(X86) && !defined(_LP64)
3231 // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3232 LIR_Opr left_copy = new_register(left->type());
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_LIRGenerator.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciFlatArrayKlass.hpp"
35 #include "ci/ciInlineKlass.hpp"
36 #include "ci/ciInstance.hpp"
37 #include "ci/ciObjArray.hpp"
38 #include "ci/ciUtilities.hpp"
39 #include "compiler/compilerDefinitions.inline.hpp"
40 #include "compiler/compilerOracle.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/c1/barrierSetC1.hpp"
43 #include "oops/klass.inline.hpp"
44 #include "oops/methodCounters.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "runtime/stubRoutines.hpp"
47 #include "runtime/vm_version.hpp"
48 #include "utilities/bitMap.inline.hpp"
49 #include "utilities/macros.hpp"
50 #include "utilities/powerOfTwo.hpp"
51
52 #ifdef ASSERT
53 #define __ gen()->lir(__FILE__, __LINE__)->
54 #else
55 #define __ gen()->lir()->
201 }
202
203
204 //--------------------------------------------------------------
205 // LIRItem
206
207 void LIRItem::set_result(LIR_Opr opr) {
208 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
209 value()->set_operand(opr);
210
211 #ifdef ASSERT
212 if (opr->is_virtual()) {
213 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), nullptr);
214 }
215 #endif
216
217 _result = opr;
218 }
219
220 void LIRItem::load_item() {
221 assert(!_gen->in_conditional_code(), "LIRItem cannot be loaded in conditional code");
222
223 if (result()->is_illegal()) {
224 // update the items result
225 _result = value()->operand();
226 }
227 if (!result()->is_register()) {
228 LIR_Opr reg = _gen->new_register(value()->type());
229 __ move(result(), reg);
230 if (result()->is_constant()) {
231 _result = reg;
232 } else {
233 set_result(reg);
234 }
235 }
236 }
237
238
239 void LIRItem::load_for_store(BasicType type) {
240 if (_gen->can_store_as_constant(value(), type)) {
241 _result = value()->operand();
242 if (!_result->is_constant()) {
610 assert(right_op != result_op, "malformed");
611 __ move(left_op, result_op);
612 left_op = result_op;
613 }
614
615 switch(code) {
616 case Bytecodes::_iand:
617 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
618
619 case Bytecodes::_ior:
620 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
621
622 case Bytecodes::_ixor:
623 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
624
625 default: ShouldNotReachHere();
626 }
627 }
628
629
630 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no,
631 CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_ie_stub) {
632 if (!GenerateSynchronizationCode) return;
633 // for slow path, use debug info for state after successful locking
634 CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_ie_stub, scratch);
635 __ load_stack_address_monitor(monitor_no, lock);
636 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
637 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_ie_stub);
638 }
639
640
641 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
642 if (!GenerateSynchronizationCode) return;
643 // setup registers
644 LIR_Opr hdr = lock;
645 lock = new_hdr;
646 CodeStub* slow_path = new MonitorExitStub(lock, LockingMode != LM_MONITOR, monitor_no);
647 __ load_stack_address_monitor(monitor_no, lock);
648 __ unlock_object(hdr, object, lock, scratch, slow_path);
649 }
650
651 #ifndef PRODUCT
652 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
653 if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
654 tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
655 } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
656 tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
657 }
658 }
659 #endif
660
661 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, bool allow_inline, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
662 if (allow_inline) {
663 assert(!is_unresolved && klass->is_loaded(), "inline type klass should be resolved");
664 __ metadata2reg(klass->constant_encoding(), klass_reg);
665 } else {
666 klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
667 }
668 // If klass is not loaded we do not know if the klass has finalizers or is an unexpected inline klass
669 if (UseFastNewInstance && klass->is_loaded() && (allow_inline || !klass->is_inlinetype())
670 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
671
672 C1StubId stub_id = klass->is_initialized() ? C1StubId::fast_new_instance_id : C1StubId::fast_new_instance_init_check_id;
673
674 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
675
676 assert(klass->is_loaded(), "must be loaded");
677 // allocate space for instance
678 assert(klass->size_helper() > 0, "illegal instance size");
679 const int instance_size = align_object_size(klass->size_helper());
680 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
681 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
682 } else {
683 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, C1StubId::new_instance_id);
684 __ jump(slow_path);
685 __ branch_destination(slow_path->continuation());
686 }
687 }
688
689
690 static bool is_constant_zero(Instruction* inst) {
691 IntConstant* c = inst->type()->as_IntConstant();
692 if (c) {
693 return (c->value() == 0);
694 }
695 return false;
696 }
697
698
699 static bool positive_constant(Instruction* inst) {
700 IntConstant* c = inst->type()->as_IntConstant();
701 if (c) {
702 return (c->value() >= 0);
703 }
704 return false;
764 if (src_type != nullptr) {
765 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
766 is_exact = true;
767 expected_type = dst_type;
768 }
769 }
770 }
771 // at least pass along a good guess
772 if (expected_type == nullptr) expected_type = dst_exact_type;
773 if (expected_type == nullptr) expected_type = src_declared_type;
774 if (expected_type == nullptr) expected_type = dst_declared_type;
775
776 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
777 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
778 }
779
780 // if a probable array type has been identified, figure out if any
781 // of the required checks for a fast case can be elided.
782 int flags = LIR_OpArrayCopy::all_flags;
783
784 if (!src->is_loaded_flat_array() && !dst->is_loaded_flat_array()) {
785 flags &= ~LIR_OpArrayCopy::always_slow_path;
786 }
787 if (!src->maybe_flat_array()) {
788 flags &= ~LIR_OpArrayCopy::src_inlinetype_check;
789 }
790 if (!dst->maybe_flat_array() && !dst->maybe_null_free_array()) {
791 flags &= ~LIR_OpArrayCopy::dst_inlinetype_check;
792 }
793
794 if (!src_objarray)
795 flags &= ~LIR_OpArrayCopy::src_objarray;
796 if (!dst_objarray)
797 flags &= ~LIR_OpArrayCopy::dst_objarray;
798
799 if (!x->arg_needs_null_check(0))
800 flags &= ~LIR_OpArrayCopy::src_null_check;
801 if (!x->arg_needs_null_check(2))
802 flags &= ~LIR_OpArrayCopy::dst_null_check;
803
804
805 if (expected_type != nullptr) {
806 Value length_limit = nullptr;
807
808 IfOp* ifop = length->as_IfOp();
809 if (ifop != nullptr) {
810 // look for expressions like min(v, a.length) which ends up as
811 // x > y ? y : x or x >= y ? y : x
812 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
813 ifop->x() == ifop->fval() &&
1327 // Example: Foo.class.getModifiers()
1328 void LIRGenerator::do_getModifiers(Intrinsic* x) {
1329 assert(x->number_of_arguments() == 1, "wrong type");
1330
1331 LIRItem receiver(x->argument_at(0), this);
1332 receiver.load_item();
1333 LIR_Opr result = rlock_result(x);
1334
1335 CodeEmitInfo* info = nullptr;
1336 if (x->needs_null_check()) {
1337 info = state_for(x);
1338 }
1339
1340 // While reading off the universal constant mirror is less efficient than doing
1341 // another branch and returning the constant answer, this branchless code runs into
1342 // much less risk of confusion for C1 register allocator. The choice of the universe
1343 // object here is correct as long as it returns the same modifiers we would expect
1344 // from the primitive class itself. See spec for Class.getModifiers that provides
1345 // the typed array klasses with similar modifiers as their component types.
1346
1347 // Valhalla update: the code is now a bit convuloted because arrays and primitive
1348 // classes don't have the same modifiers set anymore, but we cannot introduce
1349 // branches in LIR generation (JDK-8211231). So, the first part of the code remains
1350 // identical, using the byteArrayKlass object to avoid a NPE when accessing the
1351 // modifiers. But then the code also prepares the correct modifiers set for
1352 // primitive classes, and there's a second conditional move to put the right
1353 // value into result.
1354
1355
1356 Klass* univ_klass = Universe::byteArrayKlass();
1357 assert(univ_klass->modifier_flags() == (JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC
1358 | (Arguments::enable_preview() ? JVM_ACC_IDENTITY : 0)), "Sanity");
1359 LIR_Opr prim_klass = LIR_OprFact::metadataConst(univ_klass);
1360
1361 LIR_Opr recv_klass = new_register(T_METADATA);
1362 __ move(new LIR_Address(receiver.result(), java_lang_Class::klass_offset(), T_ADDRESS), recv_klass, info);
1363
1364 // Check if this is a Java mirror of primitive type, and select the appropriate klass.
1365 LIR_Opr klass = new_register(T_METADATA);
1366 __ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(nullptr));
1367 __ cmove(lir_cond_equal, prim_klass, recv_klass, klass, T_ADDRESS);
1368 LIR_Opr klass_modifiers = new_register(T_INT);
1369 __ move(new LIR_Address(klass, in_bytes(Klass::modifier_flags_offset()), T_INT), klass_modifiers);
1370
1371 LIR_Opr prim_modifiers = load_immediate(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC, T_INT);
1372
1373 __ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(0));
1374 __ cmove(lir_cond_equal, prim_modifiers, klass_modifiers, result, T_INT);
1375
1376 }
1377
1378 void LIRGenerator::do_getObjectSize(Intrinsic* x) {
1379 assert(x->number_of_arguments() == 3, "wrong type");
1380 LIR_Opr result_reg = rlock_result(x);
1381
1382 LIRItem value(x->argument_at(2), this);
1383 value.load_item();
1384
1385 LIR_Opr klass = new_register(T_METADATA);
1386 load_klass(value.result(), klass, nullptr);
1387 LIR_Opr layout = new_register(T_INT);
1388 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
1389
1390 LabelObj* L_done = new LabelObj();
1391 LabelObj* L_array = new LabelObj();
1392
1393 __ cmp(lir_cond_lessEqual, layout, 0);
1394 __ branch(lir_cond_lessEqual, L_array->label());
1395
1588 case T_FLOAT:
1589 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1590 break;
1591 case T_LONG:
1592 case T_DOUBLE:
1593 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1594 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1595 break;
1596 case T_OBJECT:
1597 if (c->as_jobject() != other->as_jobject()) continue;
1598 break;
1599 default:
1600 break;
1601 }
1602 return _reg_for_constants.at(i);
1603 }
1604 }
1605
1606 LIR_Opr result = new_register(t);
1607 __ move((LIR_Opr)c, result);
1608 if (!in_conditional_code()) {
1609 _constants.append(c);
1610 _reg_for_constants.append(result);
1611 }
1612 return result;
1613 }
1614
1615 void LIRGenerator::set_in_conditional_code(bool v) {
1616 assert(v != _in_conditional_code, "must change state");
1617 _in_conditional_code = v;
1618 }
1619
1620
1621 //------------------------field access--------------------------------------
1622
1623 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1624 assert(x->number_of_arguments() == 4, "wrong type");
1625 LIRItem obj (x->argument_at(0), this); // object
1626 LIRItem offset(x->argument_at(1), this); // offset of field
1627 LIRItem cmp (x->argument_at(2), this); // value to compare with field
1628 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
1629 assert(obj.type()->tag() == objectTag, "invalid type");
1630 assert(cmp.type()->tag() == type->tag(), "invalid type");
1631 assert(val.type()->tag() == type->tag(), "invalid type");
1632
1633 LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1634 obj, offset, cmp, val);
1635 set_result(x, result);
1636 }
1637
1638 // Comment copied form templateTable_i486.cpp
1639 // ----------------------------------------------------------------------------
1640 // Volatile variables demand their effects be made known to all CPU's in
1713 (needs_patching ||
1714 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1715 // Emit an explicit null check because the offset is too large.
1716 // If the class is not loaded and the object is null, we need to deoptimize to throw a
1717 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1718 __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1719 }
1720
1721 DecoratorSet decorators = IN_HEAP;
1722 if (is_volatile) {
1723 decorators |= MO_SEQ_CST;
1724 }
1725 if (needs_patching) {
1726 decorators |= C1_NEEDS_PATCHING;
1727 }
1728
1729 access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1730 value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
1731 }
1732
1733 // FIXME -- I can't find any other way to pass an address to access_load_at().
1734 class TempResolvedAddress: public Instruction {
1735 public:
1736 TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) {
1737 set_operand(addr);
1738 }
1739 virtual void input_values_do(ValueVisitor*) {}
1740 virtual void visit(InstructionVisitor* v) {}
1741 virtual const char* name() const { return "TempResolvedAddress"; }
1742 };
1743
1744 LIR_Opr LIRGenerator::get_and_load_element_address(LIRItem& array, LIRItem& index) {
1745 ciType* array_type = array.value()->declared_type();
1746 ciFlatArrayKlass* flat_array_klass = array_type->as_flat_array_klass();
1747 assert(flat_array_klass->is_loaded(), "must be");
1748
1749 int array_header_size = flat_array_klass->array_header_in_bytes();
1750 int shift = flat_array_klass->log2_element_size();
1751
1752 #ifndef _LP64
1753 LIR_Opr index_op = new_register(T_INT);
1754 // FIXME -- on 32-bit, the shift below can overflow, so we need to check that
1755 // the top (shift+1) bits of index_op must be zero, or
1756 // else throw ArrayIndexOutOfBoundsException
1757 if (index.result()->is_constant()) {
1758 jint const_index = index.result()->as_jint();
1759 __ move(LIR_OprFact::intConst(const_index << shift), index_op);
1760 } else {
1761 __ shift_left(index_op, shift, index.result());
1762 }
1763 #else
1764 LIR_Opr index_op = new_register(T_LONG);
1765 if (index.result()->is_constant()) {
1766 jint const_index = index.result()->as_jint();
1767 __ move(LIR_OprFact::longConst(const_index << shift), index_op);
1768 } else {
1769 __ convert(Bytecodes::_i2l, index.result(), index_op);
1770 // Need to shift manually, as LIR_Address can scale only up to 3.
1771 __ shift_left(index_op, shift, index_op);
1772 }
1773 #endif
1774
1775 LIR_Opr elm_op = new_pointer_register();
1776 LIR_Address* elm_address = generate_address(array.result(), index_op, 0, array_header_size, T_ADDRESS);
1777 __ leal(LIR_OprFact::address(elm_address), elm_op);
1778 return elm_op;
1779 }
1780
1781 void LIRGenerator::access_sub_element(LIRItem& array, LIRItem& index, LIR_Opr& result, ciField* field, int sub_offset) {
1782 assert(field != nullptr, "Need a subelement type specified");
1783
1784 // Find the starting address of the source (inside the array)
1785 LIR_Opr elm_op = get_and_load_element_address(array, index);
1786
1787 BasicType subelt_type = field->type()->basic_type();
1788 TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(subelt_type), elm_op);
1789 LIRItem elm_item(elm_resolved_addr, this);
1790
1791 DecoratorSet decorators = IN_HEAP;
1792 access_load_at(decorators, subelt_type,
1793 elm_item, LIR_OprFact::intConst(sub_offset), result,
1794 nullptr, nullptr);
1795
1796 if (field->is_null_free()) {
1797 assert(field->type()->is_loaded(), "Must be");
1798 assert(field->type()->is_inlinetype(), "Must be if loaded");
1799 assert(field->type()->as_inline_klass()->is_initialized(), "Must be");
1800 LabelObj* L_end = new LabelObj();
1801 __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(nullptr));
1802 __ branch(lir_cond_notEqual, L_end->label());
1803 set_in_conditional_code(true);
1804 Constant* default_value = new Constant(new InstanceConstant(field->type()->as_inline_klass()->default_instance()));
1805 if (default_value->is_pinned()) {
1806 __ move(LIR_OprFact::value_type(default_value->type()), result);
1807 } else {
1808 __ move(load_constant(default_value), result);
1809 }
1810 __ branch_destination(L_end->label());
1811 set_in_conditional_code(false);
1812 }
1813 }
1814
1815 void LIRGenerator::access_flat_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item,
1816 ciField* field, int sub_offset) {
1817 assert(sub_offset == 0 || field != nullptr, "Sanity check");
1818
1819 // Find the starting address of the source (inside the array)
1820 LIR_Opr elm_op = get_and_load_element_address(array, index);
1821
1822 ciInlineKlass* elem_klass = nullptr;
1823 if (field != nullptr) {
1824 elem_klass = field->type()->as_inline_klass();
1825 } else {
1826 elem_klass = array.value()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass();
1827 }
1828 for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) {
1829 ciField* inner_field = elem_klass->nonstatic_field_at(i);
1830 assert(!inner_field->is_flat(), "flat fields must have been expanded");
1831 int obj_offset = inner_field->offset_in_bytes();
1832 int elm_offset = obj_offset - elem_klass->first_field_offset() + sub_offset; // object header is not stored in array.
1833 BasicType field_type = inner_field->type()->basic_type();
1834
1835 // Types which are smaller than int are still passed in an int register.
1836 BasicType reg_type = field_type;
1837 switch (reg_type) {
1838 case T_BYTE:
1839 case T_BOOLEAN:
1840 case T_SHORT:
1841 case T_CHAR:
1842 reg_type = T_INT;
1843 break;
1844 default:
1845 break;
1846 }
1847
1848 LIR_Opr temp = new_register(reg_type);
1849 TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op);
1850 LIRItem elm_item(elm_resolved_addr, this);
1851
1852 DecoratorSet decorators = IN_HEAP;
1853 if (is_load) {
1854 access_load_at(decorators, field_type,
1855 elm_item, LIR_OprFact::intConst(elm_offset), temp,
1856 nullptr, nullptr);
1857 access_store_at(decorators, field_type,
1858 obj_item, LIR_OprFact::intConst(obj_offset), temp,
1859 nullptr, nullptr);
1860 } else {
1861 access_load_at(decorators, field_type,
1862 obj_item, LIR_OprFact::intConst(obj_offset), temp,
1863 nullptr, nullptr);
1864 access_store_at(decorators, field_type,
1865 elm_item, LIR_OprFact::intConst(elm_offset), temp,
1866 nullptr, nullptr);
1867 }
1868 }
1869 }
1870
1871 void LIRGenerator::check_flat_array(LIR_Opr array, LIR_Opr value, CodeStub* slow_path) {
1872 LIR_Opr tmp = new_register(T_METADATA);
1873 __ check_flat_array(array, value, tmp, slow_path);
1874 }
1875
1876 void LIRGenerator::check_null_free_array(LIRItem& array, LIRItem& value, CodeEmitInfo* info) {
1877 LabelObj* L_end = new LabelObj();
1878 LIR_Opr tmp = new_register(T_METADATA);
1879 __ check_null_free_array(array.result(), tmp);
1880 __ branch(lir_cond_equal, L_end->label());
1881 __ null_check(value.result(), info);
1882 __ branch_destination(L_end->label());
1883 }
1884
1885 bool LIRGenerator::needs_flat_array_store_check(StoreIndexed* x) {
1886 if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
1887 ciType* type = x->value()->declared_type();
1888 if (type != nullptr && type->is_klass()) {
1889 ciKlass* klass = type->as_klass();
1890 if (!klass->can_be_inline_klass() || (klass->is_inlinetype() && !klass->as_inline_klass()->flat_in_array())) {
1891 // This is known to be a non-flat object. If the array is a flat array,
1892 // it will be caught by the code generated by array_store_check().
1893 return false;
1894 }
1895 }
1896 // We're not 100% sure, so let's do the flat_array_store_check.
1897 return true;
1898 }
1899 return false;
1900 }
1901
1902 bool LIRGenerator::needs_null_free_array_store_check(StoreIndexed* x) {
1903 return x->elt_type() == T_OBJECT && x->array()->maybe_null_free_array();
1904 }
1905
1906 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1907 assert(x->is_pinned(),"");
1908 assert(x->elt_type() != T_ARRAY, "never used");
1909 bool is_loaded_flat_array = x->array()->is_loaded_flat_array();
1910 bool needs_range_check = x->compute_needs_range_check();
1911 bool use_length = x->length() != nullptr;
1912 bool obj_store = is_reference_type(x->elt_type());
1913 bool needs_store_check = obj_store && !(is_loaded_flat_array && x->is_exact_flat_array_store()) &&
1914 (x->value()->as_Constant() == nullptr ||
1915 !get_jobject_constant(x->value())->is_null_object());
1916
1917 LIRItem array(x->array(), this);
1918 LIRItem index(x->index(), this);
1919 LIRItem value(x->value(), this);
1920 LIRItem length(this);
1921
1922 array.load_item();
1923 index.load_nonconstant();
1924
1925 if (use_length && needs_range_check) {
1926 length.set_instruction(x->length());
1927 length.load_item();
1928 }
1929
1930 if (needs_store_check || x->check_boolean()
1931 || is_loaded_flat_array || needs_flat_array_store_check(x) || needs_null_free_array_store_check(x)) {
1932 value.load_item();
1933 } else {
1934 value.load_for_store(x->elt_type());
1935 }
1936
1937 set_no_result(x);
1938
1939 // the CodeEmitInfo must be duplicated for each different
1940 // LIR-instruction because spilling can occur anywhere between two
1941 // instructions and so the debug information must be different
1942 CodeEmitInfo* range_check_info = state_for(x);
1943 CodeEmitInfo* null_check_info = nullptr;
1944 if (x->needs_null_check()) {
1945 null_check_info = new CodeEmitInfo(range_check_info);
1946 }
1947
1948 if (needs_range_check) {
1949 if (use_length) {
1950 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1951 __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1952 } else {
1953 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1954 // range_check also does the null check
1955 null_check_info = nullptr;
1956 }
1957 }
1958
1959 if (x->should_profile()) {
1960 if (x->array()->is_loaded_flat_array()) {
1961 // No need to profile a store to a flat array of known type. This can happen if
1962 // the type only became known after optimizations (for example, after the PhiSimplifier).
1963 x->set_should_profile(false);
1964 } else {
1965 int bci = x->profiled_bci();
1966 ciMethodData* md = x->profiled_method()->method_data();
1967 assert(md != nullptr, "Sanity");
1968 ciProfileData* data = md->bci_to_data(bci);
1969 assert(data != nullptr && data->is_ArrayStoreData(), "incorrect profiling entry");
1970 ciArrayStoreData* store_data = (ciArrayStoreData*)data;
1971 profile_array_type(x, md, store_data);
1972 assert(store_data->is_ArrayStoreData(), "incorrect profiling entry");
1973 if (x->array()->maybe_null_free_array()) {
1974 profile_null_free_array(array, md, store_data);
1975 }
1976 }
1977 }
1978
1979 if (GenerateArrayStoreCheck && needs_store_check) {
1980 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1981 array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1982 }
1983
1984 if (is_loaded_flat_array) {
1985 if (!x->value()->is_null_free()) {
1986 __ null_check(value.result(), new CodeEmitInfo(range_check_info));
1987 }
1988 // If array element is an empty inline type, no need to copy anything
1989 if (!x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) {
1990 access_flat_array(false, array, index, value);
1991 }
1992 } else {
1993 StoreFlattenedArrayStub* slow_path = nullptr;
1994
1995 if (needs_flat_array_store_check(x)) {
1996 // Check if we indeed have a flat array
1997 index.load_item();
1998 slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x, x->state_before()));
1999 check_flat_array(array.result(), value.result(), slow_path);
2000 set_in_conditional_code(true);
2001 } else if (needs_null_free_array_store_check(x)) {
2002 CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
2003 check_null_free_array(array, value, info);
2004 }
2005
2006 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2007 if (x->check_boolean()) {
2008 decorators |= C1_MASK_BOOLEAN;
2009 }
2010
2011 access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
2012 nullptr, null_check_info);
2013 if (slow_path != nullptr) {
2014 __ branch_destination(slow_path->continuation());
2015 set_in_conditional_code(false);
2016 }
2017 }
2018 }
2019
2020 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
2021 LIRItem& base, LIR_Opr offset, LIR_Opr result,
2022 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
2023 decorators |= ACCESS_READ;
2024 LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
2025 if (access.is_raw()) {
2026 _barrier_set->BarrierSetC1::load_at(access, result);
2027 } else {
2028 _barrier_set->load_at(access, result);
2029 }
2030 }
2031
2032 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
2033 LIR_Opr addr, LIR_Opr result) {
2034 decorators |= ACCESS_READ;
2035 LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
2036 access.set_resolved_addr(addr);
2037 if (access.is_raw()) {
2135 __ move(LIR_OprFact::oopConst(nullptr), obj);
2136 }
2137 // Emit an explicit null check because the offset is too large.
2138 // If the class is not loaded and the object is null, we need to deoptimize to throw a
2139 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
2140 __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
2141 }
2142
2143 DecoratorSet decorators = IN_HEAP;
2144 if (is_volatile) {
2145 decorators |= MO_SEQ_CST;
2146 }
2147 if (needs_patching) {
2148 decorators |= C1_NEEDS_PATCHING;
2149 }
2150
2151 LIR_Opr result = rlock_result(x, field_type);
2152 access_load_at(decorators, field_type,
2153 object, LIR_OprFact::intConst(x->offset()), result,
2154 info ? new CodeEmitInfo(info) : nullptr, info);
2155
2156 ciField* field = x->field();
2157 if (field->is_null_free()) {
2158 // Load from non-flat inline type field requires
2159 // a null check to replace null with the default value.
2160 ciInstanceKlass* holder = field->holder();
2161 if (field->is_static() && holder->is_loaded()) {
2162 ciObject* val = holder->java_mirror()->field_value(field).as_object();
2163 if (!val->is_null_object()) {
2164 // Static field is initialized, we don't need to perform a null check.
2165 return;
2166 }
2167 }
2168 ciInlineKlass* inline_klass = field->type()->as_inline_klass();
2169 if (inline_klass->is_initialized()) {
2170 LabelObj* L_end = new LabelObj();
2171 __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(nullptr));
2172 __ branch(lir_cond_notEqual, L_end->label());
2173 set_in_conditional_code(true);
2174 Constant* default_value = new Constant(new InstanceConstant(inline_klass->default_instance()));
2175 if (default_value->is_pinned()) {
2176 __ move(LIR_OprFact::value_type(default_value->type()), result);
2177 } else {
2178 __ move(load_constant(default_value), result);
2179 }
2180 __ branch_destination(L_end->label());
2181 set_in_conditional_code(false);
2182 } else {
2183 info = state_for(x, x->state_before());
2184 __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(nullptr));
2185 __ branch(lir_cond_equal, new DeoptimizeStub(info, Deoptimization::Reason_uninitialized,
2186 Deoptimization::Action_make_not_entrant));
2187 }
2188 }
2189 }
2190
2191 // int/long jdk.internal.util.Preconditions.checkIndex
2192 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
2193 assert(x->number_of_arguments() == 3, "wrong type");
2194 LIRItem index(x->argument_at(0), this);
2195 LIRItem length(x->argument_at(1), this);
2196 LIRItem oobef(x->argument_at(2), this);
2197
2198 index.load_item();
2199 length.load_item();
2200 oobef.load_item();
2201
2202 LIR_Opr result = rlock_result(x);
2203 // x->state() is created from copy_state_for_exception, it does not contains arguments
2204 // we should prepare them before entering into interpreter mode due to deoptimization.
2205 ValueStack* state = x->state();
2206 for (int i = 0; i < x->number_of_arguments(); i++) {
2207 Value arg = x->argument_at(i);
2208 state->push(arg->type(), arg);
2313 __ move(LIR_OprFact::oopConst(nullptr), obj);
2314 __ null_check(obj, new CodeEmitInfo(null_check_info));
2315 }
2316 }
2317
2318 if (needs_range_check) {
2319 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2320 __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
2321 } else if (use_length) {
2322 // TODO: use a (modified) version of array_range_check that does not require a
2323 // constant length to be loaded to a register
2324 __ cmp(lir_cond_belowEqual, length.result(), index.result());
2325 __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
2326 } else {
2327 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2328 // The range check performs the null check, so clear it out for the load
2329 null_check_info = nullptr;
2330 }
2331 }
2332
2333 ciMethodData* md = nullptr;
2334 ciArrayLoadData* load_data = nullptr;
2335 if (x->should_profile()) {
2336 if (x->array()->is_loaded_flat_array()) {
2337 // No need to profile a load from a flat array of known type. This can happen if
2338 // the type only became known after optimizations (for example, after the PhiSimplifier).
2339 x->set_should_profile(false);
2340 } else {
2341 int bci = x->profiled_bci();
2342 md = x->profiled_method()->method_data();
2343 assert(md != nullptr, "Sanity");
2344 ciProfileData* data = md->bci_to_data(bci);
2345 assert(data != nullptr && data->is_ArrayLoadData(), "incorrect profiling entry");
2346 load_data = (ciArrayLoadData*)data;
2347 profile_array_type(x, md, load_data);
2348 }
2349 }
2350
2351 Value element;
2352 if (x->vt() != nullptr) {
2353 assert(x->array()->is_loaded_flat_array(), "must be");
2354 // Find the destination address (of the NewInlineTypeInstance).
2355 LIRItem obj_item(x->vt(), this);
2356
2357 access_flat_array(true, array, index, obj_item,
2358 x->delayed() == nullptr ? 0 : x->delayed()->field(),
2359 x->delayed() == nullptr ? 0 : x->delayed()->offset());
2360 set_no_result(x);
2361 } else if (x->delayed() != nullptr) {
2362 assert(x->array()->is_loaded_flat_array(), "must be");
2363 LIR_Opr result = rlock_result(x, x->delayed()->field()->type()->basic_type());
2364 access_sub_element(array, index, result, x->delayed()->field(), x->delayed()->offset());
2365 } else if (x->array() != nullptr && x->array()->is_loaded_flat_array() &&
2366 x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_initialized() &&
2367 x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) {
2368 // Load the default instance instead of reading the element
2369 ciInlineKlass* elem_klass = x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass();
2370 LIR_Opr result = rlock_result(x, x->elt_type());
2371 assert(elem_klass->is_initialized(), "Must be");
2372 Constant* default_value = new Constant(new InstanceConstant(elem_klass->default_instance()));
2373 if (default_value->is_pinned()) {
2374 __ move(LIR_OprFact::value_type(default_value->type()), result);
2375 } else {
2376 __ move(load_constant(default_value), result);
2377 }
2378 } else {
2379 LIR_Opr result = rlock_result(x, x->elt_type());
2380 LoadFlattenedArrayStub* slow_path = nullptr;
2381
2382 if (x->should_profile() && x->array()->maybe_null_free_array()) {
2383 profile_null_free_array(array, md, load_data);
2384 }
2385
2386 if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
2387 assert(x->delayed() == nullptr, "Delayed LoadIndexed only apply to loaded_flat_arrays");
2388 index.load_item();
2389 // if we are loading from a flat array, load it using a runtime call
2390 slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x, x->state_before()));
2391 check_flat_array(array.result(), LIR_OprFact::illegalOpr, slow_path);
2392 set_in_conditional_code(true);
2393 }
2394
2395 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2396 access_load_at(decorators, x->elt_type(),
2397 array, index.result(), result,
2398 nullptr, null_check_info);
2399
2400 if (slow_path != nullptr) {
2401 __ branch_destination(slow_path->continuation());
2402 set_in_conditional_code(false);
2403 }
2404
2405 element = x;
2406 }
2407
2408 if (x->should_profile()) {
2409 profile_element_type(element, md, load_data);
2410 }
2411 }
2412
2413
2414 void LIRGenerator::do_NullCheck(NullCheck* x) {
2415 if (x->can_trap()) {
2416 LIRItem value(x->obj(), this);
2417 value.load_item();
2418 CodeEmitInfo* info = state_for(x);
2419 __ null_check(value.result(), info);
2420 }
2421 }
2422
2423
2424 void LIRGenerator::do_TypeCast(TypeCast* x) {
2425 LIRItem value(x->obj(), this);
2426 value.load_item();
2427 // the result is the same as from the node we are casting
2428 set_result(x, value.result());
2429 }
2430
2892 Compilation* comp = Compilation::current();
2893 if (do_update) {
2894 // try to find exact type, using CHA if possible, so that loading
2895 // the klass from the object can be avoided
2896 ciType* type = obj->exact_type();
2897 if (type == nullptr) {
2898 type = obj->declared_type();
2899 type = comp->cha_exact_type(type);
2900 }
2901 assert(type == nullptr || type->is_klass(), "type should be class");
2902 exact_klass = (type != nullptr && type->is_loaded()) ? (ciKlass*)type : nullptr;
2903
2904 do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2905 }
2906
2907 if (!do_null && !do_update) {
2908 return result;
2909 }
2910
2911 ciKlass* exact_signature_k = nullptr;
2912 if (do_update && signature_at_call_k != nullptr) {
2913 // Is the type from the signature exact (the only one possible)?
2914 exact_signature_k = signature_at_call_k->exact_klass();
2915 if (exact_signature_k == nullptr) {
2916 exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2917 } else {
2918 result = exact_signature_k;
2919 // Known statically. No need to emit any code: prevent
2920 // LIR_Assembler::emit_profile_type() from emitting useless code
2921 profiled_k = ciTypeEntries::with_status(result, profiled_k);
2922 }
2923 // exact_klass and exact_signature_k can be both non null but
2924 // different if exact_klass is loaded after the ciObject for
2925 // exact_signature_k is created.
2926 if (exact_klass == nullptr && exact_signature_k != nullptr && exact_klass != exact_signature_k) {
2927 // sometimes the type of the signature is better than the best type
2928 // the compiler has
2929 exact_klass = exact_signature_k;
2930 }
2931 if (callee_signature_k != nullptr &&
2932 callee_signature_k != signature_at_call_k) {
2977 assert(!src->is_illegal(), "check");
2978 BasicType t = src->type();
2979 if (is_reference_type(t)) {
2980 intptr_t profiled_k = parameters->type(j);
2981 Local* local = x->state()->local_at(java_index)->as_Local();
2982 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2983 in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2984 profiled_k, local, mdp, false, local->declared_type()->as_klass(), nullptr);
2985 // If the profile is known statically set it once for all and do not emit any code
2986 if (exact != nullptr) {
2987 md->set_parameter_type(j, exact);
2988 }
2989 j++;
2990 }
2991 java_index += type2size[t];
2992 }
2993 }
2994 }
2995 }
2996
2997 void LIRGenerator::profile_flags(ciMethodData* md, ciProfileData* data, int flag, LIR_Condition condition) {
2998 assert(md != nullptr && data != nullptr, "should have been initialized");
2999 LIR_Opr mdp = new_register(T_METADATA);
3000 __ metadata2reg(md->constant_encoding(), mdp);
3001 LIR_Address* addr = new LIR_Address(mdp, md->byte_offset_of_slot(data, DataLayout::flags_offset()), T_BYTE);
3002 LIR_Opr flags = new_register(T_INT);
3003 __ move(addr, flags);
3004 if (condition != lir_cond_always) {
3005 LIR_Opr update = new_register(T_INT);
3006 __ cmove(condition, LIR_OprFact::intConst(0), LIR_OprFact::intConst(flag), update, T_INT);
3007 } else {
3008 __ logical_or(flags, LIR_OprFact::intConst(flag), flags);
3009 }
3010 __ store(flags, addr);
3011 }
3012
3013 template <class ArrayData> void LIRGenerator::profile_null_free_array(LIRItem array, ciMethodData* md, ArrayData* load_store) {
3014 assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3015 LabelObj* L_end = new LabelObj();
3016 LIR_Opr tmp = new_register(T_METADATA);
3017 __ check_null_free_array(array.result(), tmp);
3018
3019 profile_flags(md, load_store, ArrayStoreData::null_free_array_byte_constant(), lir_cond_equal);
3020 }
3021
3022 template <class ArrayData> void LIRGenerator::profile_array_type(AccessIndexed* x, ciMethodData*& md, ArrayData*& load_store) {
3023 assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3024 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3025 profile_type(md, md->byte_offset_of_slot(load_store, ArrayData::array_offset()), 0,
3026 load_store->array()->type(), x->array(), mdp, true, nullptr, nullptr);
3027 }
3028
3029 void LIRGenerator::profile_element_type(Value element, ciMethodData* md, ciArrayLoadData* load_data) {
3030 assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3031 assert(md != nullptr && load_data != nullptr, "should have been initialized");
3032 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3033 profile_type(md, md->byte_offset_of_slot(load_data, ArrayLoadData::element_offset()), 0,
3034 load_data->element()->type(), element, mdp, false, nullptr, nullptr);
3035 }
3036
3037 void LIRGenerator::do_Base(Base* x) {
3038 __ std_entry(LIR_OprFact::illegalOpr);
3039 // Emit moves from physical registers / stack slots to virtual registers
3040 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
3041 IRScope* irScope = compilation()->hir()->top_scope();
3042 int java_index = 0;
3043 for (int i = 0; i < args->length(); i++) {
3044 LIR_Opr src = args->at(i);
3045 assert(!src->is_illegal(), "check");
3046 BasicType t = src->type();
3047
3048 // Types which are smaller than int are passed as int, so
3049 // correct the type which passed.
3050 switch (t) {
3051 case T_BYTE:
3052 case T_BOOLEAN:
3053 case T_SHORT:
3054 case T_CHAR:
3055 t = T_INT;
3056 break;
3099 }
3100 assert(obj->is_valid(), "must be valid");
3101
3102 if (method()->is_synchronized() && GenerateSynchronizationCode) {
3103 LIR_Opr lock = syncLockOpr();
3104 __ load_stack_address_monitor(0, lock);
3105
3106 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, x->check_flag(Instruction::DeoptimizeOnException));
3107 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
3108
3109 // receiver is guaranteed non-null so don't need CodeEmitInfo
3110 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, nullptr);
3111 }
3112 }
3113 // increment invocation counters if needed
3114 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
3115 profile_parameters(x);
3116 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, false);
3117 increment_invocation_counter(info);
3118 }
3119 if (method()->has_scalarized_args()) {
3120 // Check if deoptimization was triggered (i.e. orig_pc was set) while buffering scalarized inline type arguments
3121 // in the entry point (see comments in frame::deoptimize). If so, deoptimize only now that we have the right state.
3122 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), nullptr, false);
3123 CodeStub* deopt_stub = new DeoptimizeStub(info, Deoptimization::Reason_none, Deoptimization::Action_none);
3124 __ append(new LIR_Op0(lir_check_orig_pc));
3125 __ branch(lir_cond_notEqual, deopt_stub);
3126 }
3127
3128 // all blocks with a successor must end with an unconditional jump
3129 // to the successor even if they are consecutive
3130 __ jump(x->default_sux());
3131 }
3132
3133
3134 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
3135 // construct our frame and model the production of incoming pointer
3136 // to the OSR buffer.
3137 __ osr_entry(LIR_Assembler::osrBufferPointer());
3138 LIR_Opr result = rlock_result(x);
3139 __ move(LIR_Assembler::osrBufferPointer(), result);
3140 }
3141
3142 void LIRGenerator::invoke_load_one_argument(LIRItem* param, LIR_Opr loc) {
3143 if (loc->is_register()) {
3144 param->load_item_force(loc);
3145 } else {
3146 LIR_Address* addr = loc->as_address_ptr();
3147 param->load_for_store(addr->type());
3148 if (addr->type() == T_OBJECT) {
3149 __ move_wide(param->result(), addr);
3150 } else {
3151 __ move(param->result(), addr);
3152 }
3153 }
3154 }
3155
3156 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
3157 assert(args->length() == arg_list->length(),
3158 "args=%d, arg_list=%d", args->length(), arg_list->length());
3159 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
3160 LIRItem* param = args->at(i);
3161 LIR_Opr loc = arg_list->at(i);
3162 invoke_load_one_argument(param, loc);
3163 }
3164
3165 if (x->has_receiver()) {
3166 LIRItem* receiver = args->at(0);
3167 LIR_Opr loc = arg_list->at(0);
3168 if (loc->is_register()) {
3169 receiver->load_item_force(loc);
3170 } else {
3171 assert(loc->is_address(), "just checking");
3172 receiver->load_for_store(T_OBJECT);
3173 __ move_wide(receiver->result(), loc->as_address_ptr());
3174 }
3175 }
3176 }
3177
3178
3179 // Visits all arguments, returns appropriate items without loading them
3180 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
3181 LIRItemList* argument_items = new LIRItemList();
3182 if (x->has_receiver()) {
3308 __ move(tmp, reg);
3309 }
3310
3311
3312
3313 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3314 void LIRGenerator::do_IfOp(IfOp* x) {
3315 #ifdef ASSERT
3316 {
3317 ValueTag xtag = x->x()->type()->tag();
3318 ValueTag ttag = x->tval()->type()->tag();
3319 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3320 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3321 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3322 }
3323 #endif
3324
3325 LIRItem left(x->x(), this);
3326 LIRItem right(x->y(), this);
3327 left.load_item();
3328 if (can_inline_as_constant(right.value()) && !x->substitutability_check()) {
3329 right.dont_load_item();
3330 } else {
3331 // substitutability_check() needs to use right as a base register.
3332 right.load_item();
3333 }
3334
3335 LIRItem t_val(x->tval(), this);
3336 LIRItem f_val(x->fval(), this);
3337 t_val.dont_load_item();
3338 f_val.dont_load_item();
3339
3340 if (x->substitutability_check()) {
3341 substitutability_check(x, left, right, t_val, f_val);
3342 } else {
3343 LIR_Opr reg = rlock_result(x);
3344 __ cmp(lir_cond(x->cond()), left.result(), right.result());
3345 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3346 }
3347 }
3348
3349 void LIRGenerator::substitutability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val) {
3350 assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3351 bool is_acmpeq = (x->cond() == If::eql);
3352 LIR_Opr equal_result = is_acmpeq ? t_val.result() : f_val.result();
3353 LIR_Opr not_equal_result = is_acmpeq ? f_val.result() : t_val.result();
3354 LIR_Opr result = rlock_result(x);
3355 CodeEmitInfo* info = state_for(x, x->state_before());
3356
3357 substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3358 }
3359
3360 void LIRGenerator::substitutability_check(If* x, LIRItem& left, LIRItem& right) {
3361 LIR_Opr equal_result = LIR_OprFact::intConst(1);
3362 LIR_Opr not_equal_result = LIR_OprFact::intConst(0);
3363 LIR_Opr result = new_register(T_INT);
3364 CodeEmitInfo* info = state_for(x, x->state_before());
3365
3366 substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3367
3368 assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3369 __ cmp(lir_cond(x->cond()), result, equal_result);
3370 }
3371
3372 void LIRGenerator::substitutability_check_common(Value left_val, Value right_val, LIRItem& left, LIRItem& right,
3373 LIR_Opr equal_result, LIR_Opr not_equal_result, LIR_Opr result,
3374 CodeEmitInfo* info) {
3375 LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
3376 LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
3377 LIR_Opr left_klass_op = LIR_OprFact::illegalOpr;
3378 LIR_Opr right_klass_op = LIR_OprFact::illegalOpr;
3379
3380 ciKlass* left_klass = left_val ->as_loaded_klass_or_null();
3381 ciKlass* right_klass = right_val->as_loaded_klass_or_null();
3382
3383 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
3384 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
3385 init_temps_for_substitutability_check(tmp1, tmp2);
3386 }
3387
3388 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
3389 // No need to load klass -- the operands are statically known to be the same inline klass.
3390 } else {
3391 BasicType t_klass = UseCompressedOops ? T_INT : T_METADATA;
3392 left_klass_op = new_register(t_klass);
3393 right_klass_op = new_register(t_klass);
3394 }
3395
3396 CodeStub* slow_path = new SubstitutabilityCheckStub(left.result(), right.result(), info);
3397 __ substitutability_check(result, left.result(), right.result(), equal_result, not_equal_result,
3398 tmp1, tmp2,
3399 left_klass, right_klass, left_klass_op, right_klass_op, info, slow_path);
3400 }
3401
3402 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
3403 assert(x->number_of_arguments() == 0, "wrong type");
3404 // Enforce computation of _reserved_argument_area_size which is required on some platforms.
3405 BasicTypeList signature;
3406 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
3407 LIR_Opr reg = result_register_for(x->type());
3408 __ call_runtime_leaf(routine, getThreadTemp(),
3409 reg, new LIR_OprList());
3410 LIR_Opr result = rlock_result(x);
3411 __ move(reg, result);
3412 }
3413
3414
3415
3416 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3417 switch (x->id()) {
3418 case vmIntrinsics::_intBitsToFloat :
3419 case vmIntrinsics::_doubleToRawLongBits :
3654 if (x->recv() != nullptr || x->nb_profiled_args() > 0) {
3655 profile_parameters_at_call(x);
3656 }
3657
3658 if (x->recv() != nullptr) {
3659 LIRItem value(x->recv(), this);
3660 value.load_item();
3661 recv = new_register(T_OBJECT);
3662 __ move(value.result(), recv);
3663 }
3664 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3665 }
3666
3667 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3668 int bci = x->bci_of_invoke();
3669 ciMethodData* md = x->method()->method_data_or_null();
3670 assert(md != nullptr, "Sanity");
3671 ciProfileData* data = md->bci_to_data(bci);
3672 if (data != nullptr) {
3673 assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3674 ciSingleTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3675 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3676
3677 bool ignored_will_link;
3678 ciSignature* signature_at_call = nullptr;
3679 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3680
3681 // The offset within the MDO of the entry to update may be too large
3682 // to be used in load/store instructions on some platforms. So have
3683 // profile_type() compute the address of the profile in a register.
3684 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3685 ret->type(), x->ret(), mdp,
3686 !x->needs_null_check(),
3687 signature_at_call->return_type()->as_klass(),
3688 x->callee()->signature()->return_type()->as_klass());
3689 if (exact != nullptr) {
3690 md->set_return_type(bci, exact);
3691 }
3692 }
3693 }
3694
3695 bool LIRGenerator::profile_inline_klass(ciMethodData* md, ciProfileData* data, Value value, int flag) {
3696 ciKlass* klass = value->as_loaded_klass_or_null();
3697 if (klass != nullptr) {
3698 if (klass->is_inlinetype()) {
3699 profile_flags(md, data, flag, lir_cond_always);
3700 } else if (klass->can_be_inline_klass()) {
3701 return false;
3702 }
3703 } else {
3704 return false;
3705 }
3706 return true;
3707 }
3708
3709
3710 void LIRGenerator::do_ProfileACmpTypes(ProfileACmpTypes* x) {
3711 ciMethod* method = x->method();
3712 assert(method != nullptr, "method should be set if branch is profiled");
3713 ciMethodData* md = method->method_data_or_null();
3714 assert(md != nullptr, "Sanity");
3715 ciProfileData* data = md->bci_to_data(x->bci());
3716 assert(data != nullptr, "must have profiling data");
3717 assert(data->is_ACmpData(), "need BranchData for two-way branches");
3718 ciACmpData* acmp = (ciACmpData*)data;
3719 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3720 profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()), 0,
3721 acmp->left()->type(), x->left(), mdp, !x->left_maybe_null(), nullptr, nullptr);
3722 int flags_offset = md->byte_offset_of_slot(data, DataLayout::flags_offset());
3723 if (!profile_inline_klass(md, acmp, x->left(), ACmpData::left_inline_type_byte_constant())) {
3724 LIR_Opr mdp = new_register(T_METADATA);
3725 __ metadata2reg(md->constant_encoding(), mdp);
3726 LIRItem value(x->left(), this);
3727 value.load_item();
3728 __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::left_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3729 }
3730 profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()),
3731 in_bytes(ACmpData::right_offset()) - in_bytes(ACmpData::left_offset()),
3732 acmp->right()->type(), x->right(), mdp, !x->right_maybe_null(), nullptr, nullptr);
3733 if (!profile_inline_klass(md, acmp, x->right(), ACmpData::right_inline_type_byte_constant())) {
3734 LIR_Opr mdp = new_register(T_METADATA);
3735 __ metadata2reg(md->constant_encoding(), mdp);
3736 LIRItem value(x->right(), this);
3737 value.load_item();
3738 __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::right_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3739 }
3740 }
3741
3742 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3743 // We can safely ignore accessors here, since c2 will inline them anyway,
3744 // accessors are also always mature.
3745 if (!x->inlinee()->is_accessor()) {
3746 CodeEmitInfo* info = state_for(x, x->state(), true);
3747 // Notify the runtime very infrequently only to take care of counter overflows
3748 int freq_log = Tier23InlineeNotifyFreqLog;
3749 double scale;
3750 if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
3751 freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3752 }
3753 increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3754 }
3755 }
3756
3757 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3758 if (compilation()->is_profiling()) {
3759 #if defined(X86) && !defined(_LP64)
3760 // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3761 LIR_Opr left_copy = new_register(left->type());
|