6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/ciUtilities.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "asm/register.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "gc/shared/c2/barrierSetC2.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "opto/addnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/convertnode.hpp"
38 #include "opto/graphKit.hpp"
39 #include "opto/idealKit.hpp"
40 #include "opto/intrinsicnode.hpp"
41 #include "opto/locknode.hpp"
42 #include "opto/machnode.hpp"
43 #include "opto/opaquenode.hpp"
44 #include "opto/parse.hpp"
45 #include "opto/rootnode.hpp"
46 #include "opto/runtime.hpp"
47 #include "opto/subtypenode.hpp"
48 #include "runtime/deoptimization.hpp"
49 #include "runtime/sharedRuntime.hpp"
50 #include "utilities/bitMap.inline.hpp"
51 #include "utilities/powerOfTwo.hpp"
52 #include "utilities/growableArray.hpp"
53
54 //----------------------------GraphKit-----------------------------------------
55 // Main utility constructor.
56 GraphKit::GraphKit(JVMState* jvms)
57 : Phase(Phase::Parser),
58 _env(C->env()),
59 _gvn(*C->initial_gvn()),
60 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
61 {
62 _exceptions = jvms->map()->next_exception();
63 if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr);
64 set_jvms(jvms);
65 }
66
67 // Private constructor for parser.
68 GraphKit::GraphKit()
69 : Phase(Phase::Parser),
70 _env(C->env()),
71 _gvn(*C->initial_gvn()),
72 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
73 {
74 _exceptions = nullptr;
75 set_map(nullptr);
76 debug_only(_sp = -99);
77 debug_only(set_bci(-99));
78 }
79
80
81
82 //---------------------------clean_stack---------------------------------------
83 // Clear away rubbish from the stack area of the JVM state.
84 // This destroys any arguments that may be waiting on the stack.
840 if (PrintMiscellaneous && (Verbose || WizardMode)) {
841 tty->print_cr("Zombie local %d: ", local);
842 jvms->dump();
843 }
844 return false;
845 }
846 }
847 }
848 return true;
849 }
850
851 #endif //ASSERT
852
853 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.
854 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
855 ciMethod* cur_method = jvms->method();
856 int cur_bci = jvms->bci();
857 if (cur_method != nullptr && cur_bci != InvocationEntryBci) {
858 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
859 return Interpreter::bytecode_should_reexecute(code) ||
860 (is_anewarray && code == Bytecodes::_multianewarray);
861 // Reexecute _multianewarray bytecode which was replaced with
862 // sequence of [a]newarray. See Parse::do_multianewarray().
863 //
864 // Note: interpreter should not have it set since this optimization
865 // is limited by dimensions and guarded by flag so in some cases
866 // multianewarray() runtime calls will be generated and
867 // the bytecode should not be reexecutes (stack will not be reset).
868 } else {
869 return false;
870 }
871 }
872
873 // Helper function for adding JVMState and debug information to node
874 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
875 // Add the safepoint edges to the call (or other safepoint).
876
877 // Make sure dead locals are set to top. This
878 // should help register allocation time and cut down on the size
879 // of the deoptimization information.
880 assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
956 uint p = debug_start; // walks forward in [debug_start, debug_end)
957 uint j, k, l;
958 SafePointNode* in_map = in_jvms->map();
959 out_jvms->set_map(call);
960
961 if (can_prune_locals) {
962 assert(in_jvms->method() == out_jvms->method(), "sanity");
963 // If the current throw can reach an exception handler in this JVMS,
964 // then we must keep everything live that can reach that handler.
965 // As a quick and dirty approximation, we look for any handlers at all.
966 if (in_jvms->method()->has_exception_handlers()) {
967 can_prune_locals = false;
968 }
969 }
970
971 // Add the Locals
972 k = in_jvms->locoff();
973 l = in_jvms->loc_size();
974 out_jvms->set_locoff(p);
975 if (!can_prune_locals) {
976 for (j = 0; j < l; j++)
977 call->set_req(p++, in_map->in(k+j));
978 } else {
979 p += l; // already set to top above by add_req_batch
980 }
981
982 // Add the Expression Stack
983 k = in_jvms->stkoff();
984 l = in_jvms->sp();
985 out_jvms->set_stkoff(p);
986 if (!can_prune_locals) {
987 for (j = 0; j < l; j++)
988 call->set_req(p++, in_map->in(k+j));
989 } else if (can_prune_locals && stack_slots_not_pruned != 0) {
990 // Divide stack into {S0,...,S1}, where S0 is set to top.
991 uint s1 = stack_slots_not_pruned;
992 stack_slots_not_pruned = 0; // for next iteration
993 if (s1 > l) s1 = l;
994 uint s0 = l - s1;
995 p += s0; // skip the tops preinstalled by add_req_batch
996 for (j = s0; j < l; j++)
997 call->set_req(p++, in_map->in(k+j));
998 } else {
999 p += l; // already set to top above by add_req_batch
1000 }
1001
1002 // Add the Monitors
1003 k = in_jvms->monoff();
1004 l = in_jvms->mon_size();
1005 out_jvms->set_monoff(p);
1006 for (j = 0; j < l; j++)
1007 call->set_req(p++, in_map->in(k+j));
1008
1182 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1183 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1184 return _gvn.transform( new AndLNode(conv, mask) );
1185 }
1186
1187 Node* GraphKit::ConvL2I(Node* offset) {
1188 // short-circuit a common case
1189 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1190 if (offset_con != (jlong)Type::OffsetBot) {
1191 return intcon((int) offset_con);
1192 }
1193 return _gvn.transform( new ConvL2INode(offset));
1194 }
1195
1196 //-------------------------load_object_klass-----------------------------------
1197 Node* GraphKit::load_object_klass(Node* obj) {
1198 // Special-case a fresh allocation to avoid building nodes:
1199 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1200 if (akls != nullptr) return akls;
1201 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1202 return _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), k_adr, TypeInstPtr::KLASS));
1203 }
1204
1205 //-------------------------load_array_length-----------------------------------
1206 Node* GraphKit::load_array_length(Node* array) {
1207 // Special-case a fresh allocation to avoid building nodes:
1208 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array);
1209 Node *alen;
1210 if (alloc == nullptr) {
1211 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1212 alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
1213 } else {
1214 alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false);
1215 }
1216 return alen;
1217 }
1218
1219 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc,
1220 const TypeOopPtr* oop_type,
1221 bool replace_length_in_map) {
1222 Node* length = alloc->Ideal_length();
1231 replace_in_map(length, ccast);
1232 }
1233 return ccast;
1234 }
1235 }
1236 return length;
1237 }
1238
1239 //------------------------------do_null_check----------------------------------
1240 // Helper function to do a null pointer check. Returned value is
1241 // the incoming address with null casted away. You are allowed to use the
1242 // not-null value only if you are control dependent on the test.
1243 #ifndef PRODUCT
1244 extern uint explicit_null_checks_inserted,
1245 explicit_null_checks_elided;
1246 #endif
1247 Node* GraphKit::null_check_common(Node* value, BasicType type,
1248 // optional arguments for variations:
1249 bool assert_null,
1250 Node* *null_control,
1251 bool speculative) {
1252 assert(!assert_null || null_control == nullptr, "not both at once");
1253 if (stopped()) return top();
1254 NOT_PRODUCT(explicit_null_checks_inserted++);
1255
1256 // Construct null check
1257 Node *chk = nullptr;
1258 switch(type) {
1259 case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1260 case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;
1261 case T_ARRAY : // fall through
1262 type = T_OBJECT; // simplify further tests
1263 case T_OBJECT : {
1264 const Type *t = _gvn.type( value );
1265
1266 const TypeOopPtr* tp = t->isa_oopptr();
1267 if (tp != nullptr && !tp->is_loaded()
1268 // Only for do_null_check, not any of its siblings:
1269 && !assert_null && null_control == nullptr) {
1270 // Usually, any field access or invocation on an unloaded oop type
1271 // will simply fail to link, since the statically linked class is
1272 // likely also to be unloaded. However, in -Xcomp mode, sometimes
1273 // the static class is loaded but the sharper oop type is not.
1274 // Rather than checking for this obscure case in lots of places,
1275 // we simply observe that a null check on an unloaded class
1339 }
1340 Node *oldcontrol = control();
1341 set_control(cfg);
1342 Node *res = cast_not_null(value);
1343 set_control(oldcontrol);
1344 NOT_PRODUCT(explicit_null_checks_elided++);
1345 return res;
1346 }
1347 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1348 if (cfg == nullptr) break; // Quit at region nodes
1349 depth++;
1350 }
1351 }
1352
1353 //-----------
1354 // Branch to failure if null
1355 float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
1356 Deoptimization::DeoptReason reason;
1357 if (assert_null) {
1358 reason = Deoptimization::reason_null_assert(speculative);
1359 } else if (type == T_OBJECT) {
1360 reason = Deoptimization::reason_null_check(speculative);
1361 } else {
1362 reason = Deoptimization::Reason_div0_check;
1363 }
1364 // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1365 // ciMethodData::has_trap_at will return a conservative -1 if any
1366 // must-be-null assertion has failed. This could cause performance
1367 // problems for a method after its first do_null_assert failure.
1368 // Consider using 'Reason_class_check' instead?
1369
1370 // To cause an implicit null check, we set the not-null probability
1371 // to the maximum (PROB_MAX). For an explicit check the probability
1372 // is set to a smaller value.
1373 if (null_control != nullptr || too_many_traps(reason)) {
1374 // probability is less likely
1375 ok_prob = PROB_LIKELY_MAG(3);
1376 } else if (!assert_null &&
1377 (ImplicitNullCheckThreshold > 0) &&
1378 method() != nullptr &&
1379 (method()->method_data()->trap_count(reason)
1413 }
1414
1415 if (assert_null) {
1416 // Cast obj to null on this path.
1417 replace_in_map(value, zerocon(type));
1418 return zerocon(type);
1419 }
1420
1421 // Cast obj to not-null on this path, if there is no null_control.
1422 // (If there is a null_control, a non-null value may come back to haunt us.)
1423 if (type == T_OBJECT) {
1424 Node* cast = cast_not_null(value, false);
1425 if (null_control == nullptr || (*null_control) == top())
1426 replace_in_map(value, cast);
1427 value = cast;
1428 }
1429
1430 return value;
1431 }
1432
1433
1434 //------------------------------cast_not_null----------------------------------
1435 // Cast obj to not-null on this path
1436 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1437 const Type *t = _gvn.type(obj);
1438 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1439 // Object is already not-null?
1440 if( t == t_not_null ) return obj;
1441
1442 Node *cast = new CastPPNode(obj,t_not_null);
1443 cast->init_req(0, control());
1444 cast = _gvn.transform( cast );
1445
1446 // Scan for instances of 'obj' in the current JVM mapping.
1447 // These instances are known to be not-null after the test.
1448 if (do_replace_in_map)
1449 replace_in_map(obj, cast);
1450
1451 return cast; // Return casted value
1452 }
1453
1454 // Sometimes in intrinsics, we implicitly know an object is not null
1455 // (there's no actual null check) so we can cast it to not null. In
1456 // the course of optimizations, the input to the cast can become null.
1543 // These are layered on top of the factory methods in LoadNode and StoreNode,
1544 // and integrate with the parser's memory state and _gvn engine.
1545 //
1546
1547 // factory methods in "int adr_idx"
1548 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1549 int adr_idx,
1550 MemNode::MemOrd mo,
1551 LoadNode::ControlDependency control_dependency,
1552 bool require_atomic_access,
1553 bool unaligned,
1554 bool mismatched,
1555 bool unsafe,
1556 uint8_t barrier_data) {
1557 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1558 const TypePtr* adr_type = nullptr; // debug-mode-only argument
1559 debug_only(adr_type = C->get_adr_type(adr_idx));
1560 Node* mem = memory(adr_idx);
1561 Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
1562 ld = _gvn.transform(ld);
1563 if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1564 // Improve graph before escape analysis and boxing elimination.
1565 record_for_igvn(ld);
1566 }
1567 return ld;
1568 }
1569
1570 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1571 int adr_idx,
1572 MemNode::MemOrd mo,
1573 bool require_atomic_access,
1574 bool unaligned,
1575 bool mismatched,
1576 bool unsafe,
1577 int barrier_data) {
1578 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1579 const TypePtr* adr_type = nullptr;
1580 debug_only(adr_type = C->get_adr_type(adr_idx));
1581 Node *mem = memory(adr_idx);
1582 Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access);
1589 if (unsafe) {
1590 st->as_Store()->set_unsafe_access();
1591 }
1592 st->as_Store()->set_barrier_data(barrier_data);
1593 st = _gvn.transform(st);
1594 set_memory(st, adr_idx);
1595 // Back-to-back stores can only remove intermediate store with DU info
1596 // so push on worklist for optimizer.
1597 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1598 record_for_igvn(st);
1599
1600 return st;
1601 }
1602
1603 Node* GraphKit::access_store_at(Node* obj,
1604 Node* adr,
1605 const TypePtr* adr_type,
1606 Node* val,
1607 const Type* val_type,
1608 BasicType bt,
1609 DecoratorSet decorators) {
1610 // Transformation of a value which could be null pointer (CastPP #null)
1611 // could be delayed during Parse (for example, in adjust_map_after_if()).
1612 // Execute transformation here to avoid barrier generation in such case.
1613 if (_gvn.type(val) == TypePtr::NULL_PTR) {
1614 val = _gvn.makecon(TypePtr::NULL_PTR);
1615 }
1616
1617 if (stopped()) {
1618 return top(); // Dead path ?
1619 }
1620
1621 assert(val != nullptr, "not dead path");
1622
1623 C2AccessValuePtr addr(adr, adr_type);
1624 C2AccessValue value(val, val_type);
1625 C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1626 if (access.is_raw()) {
1627 return _barrier_set->BarrierSetC2::store_at(access, value);
1628 } else {
1629 return _barrier_set->store_at(access, value);
1630 }
1631 }
1632
1633 Node* GraphKit::access_load_at(Node* obj, // containing obj
1634 Node* adr, // actual address to store val at
1635 const TypePtr* adr_type,
1636 const Type* val_type,
1637 BasicType bt,
1638 DecoratorSet decorators) {
1639 if (stopped()) {
1640 return top(); // Dead path ?
1641 }
1642
1643 C2AccessValuePtr addr(adr, adr_type);
1644 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr);
1645 if (access.is_raw()) {
1646 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1647 } else {
1648 return _barrier_set->load_at(access, val_type);
1649 }
1650 }
1651
1652 Node* GraphKit::access_load(Node* adr, // actual address to load val at
1653 const Type* val_type,
1654 BasicType bt,
1655 DecoratorSet decorators) {
1656 if (stopped()) {
1657 return top(); // Dead path ?
1658 }
1659
1660 C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());
1661 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, nullptr, addr);
1662 if (access.is_raw()) {
1663 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1664 } else {
1729 Node* new_val,
1730 const Type* value_type,
1731 BasicType bt,
1732 DecoratorSet decorators) {
1733 C2AccessValuePtr addr(adr, adr_type);
1734 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1735 if (access.is_raw()) {
1736 return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1737 } else {
1738 return _barrier_set->atomic_add_at(access, new_val, value_type);
1739 }
1740 }
1741
1742 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
1743 return _barrier_set->clone(this, src, dst, size, is_array);
1744 }
1745
1746 //-------------------------array_element_address-------------------------
1747 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1748 const TypeInt* sizetype, Node* ctrl) {
1749 uint shift = exact_log2(type2aelembytes(elembt));
1750 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1751
1752 // short-circuit a common case (saves lots of confusing waste motion)
1753 jint idx_con = find_int_con(idx, -1);
1754 if (idx_con >= 0) {
1755 intptr_t offset = header + ((intptr_t)idx_con << shift);
1756 return basic_plus_adr(ary, offset);
1757 }
1758
1759 // must be correct type for alignment purposes
1760 Node* base = basic_plus_adr(ary, header);
1761 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1762 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1763 return basic_plus_adr(ary, base, scale);
1764 }
1765
1766 //-------------------------load_array_element-------------------------
1767 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
1768 const Type* elemtype = arytype->elem();
1769 BasicType elembt = elemtype->array_element_basic_type();
1770 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1771 if (elembt == T_NARROWOOP) {
1772 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1773 }
1774 Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,
1775 IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0));
1776 return ld;
1777 }
1778
1779 //-------------------------set_arguments_for_java_call-------------------------
1780 // Arguments (pre-popped from the stack) are taken from the JVMS.
1781 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1782 // Add the call arguments:
1783 uint nargs = call->method()->arg_size();
1784 for (uint i = 0; i < nargs; i++) {
1785 Node* arg = argument(i);
1786 call->init_req(i + TypeFunc::Parms, arg);
1787 }
1788 }
1789
1790 //---------------------------set_edges_for_java_call---------------------------
1791 // Connect a newly created call into the current JVMS.
1792 // A return value node (if any) is returned from set_edges_for_java_call.
1793 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1794
1795 // Add the predefined inputs:
1796 call->init_req( TypeFunc::Control, control() );
1797 call->init_req( TypeFunc::I_O , i_o() );
1798 call->init_req( TypeFunc::Memory , reset_memory() );
1799 call->init_req( TypeFunc::FramePtr, frameptr() );
1800 call->init_req( TypeFunc::ReturnAdr, top() );
1801
1802 add_safepoint_edges(call, must_throw);
1803
1804 Node* xcall = _gvn.transform(call);
1805
1806 if (xcall == top()) {
1807 set_control(top());
1808 return;
1809 }
1810 assert(xcall == call, "call identity is stable");
1811
1812 // Re-use the current map to produce the result.
1813
1814 set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1815 set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj)));
1816 set_all_memory_call(xcall, separate_io_proj);
1817
1818 //return xcall; // no need, caller already has it
1819 }
1820
1821 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1822 if (stopped()) return top(); // maybe the call folded up?
1823
1824 // Capture the return value, if any.
1825 Node* ret;
1826 if (call->method() == nullptr ||
1827 call->method()->return_type()->basic_type() == T_VOID)
1828 ret = top();
1829 else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1830
1831 // Note: Since any out-of-line call can produce an exception,
1832 // we always insert an I_O projection from the call into the result.
1833
1834 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1835
1836 if (separate_io_proj) {
1837 // The caller requested separate projections be used by the fall
1838 // through and exceptional paths, so replace the projections for
1839 // the fall through path.
1840 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1841 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1842 }
1843 return ret;
1844 }
1845
1846 //--------------------set_predefined_input_for_runtime_call--------------------
1847 // Reading and setting the memory state is way conservative here.
1848 // The real problem is that I am not doing real Type analysis on memory,
1849 // so I cannot distinguish card mark stores from other stores. Across a GC
1850 // point the Store Barrier and the card mark memory has to agree. I cannot
1851 // have a card mark store and its barrier split across the GC point from
1852 // either above or below. Here I get that to happen by reading ALL of memory.
1853 // A better answer would be to separate out card marks from other memory.
1854 // For now, return the input memory state, so that it can be reused
1855 // after the call, if this call has restricted memory effects.
1856 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
1857 // Set fixed predefined input arguments
1858 Node* memory = reset_memory();
1859 Node* m = narrow_mem == nullptr ? memory : narrow_mem;
1860 call->init_req( TypeFunc::Control, control() );
1861 call->init_req( TypeFunc::I_O, top() ); // does no i/o
1862 call->init_req( TypeFunc::Memory, m ); // may gc ptrs
1913 if (use->is_MergeMem()) {
1914 wl.push(use);
1915 }
1916 }
1917 }
1918
1919 // Replace the call with the current state of the kit.
1920 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
1921 JVMState* ejvms = nullptr;
1922 if (has_exceptions()) {
1923 ejvms = transfer_exceptions_into_jvms();
1924 }
1925
1926 ReplacedNodes replaced_nodes = map()->replaced_nodes();
1927 ReplacedNodes replaced_nodes_exception;
1928 Node* ex_ctl = top();
1929
1930 SafePointNode* final_state = stop();
1931
1932 // Find all the needed outputs of this call
1933 CallProjections callprojs;
1934 call->extract_projections(&callprojs, true);
1935
1936 Unique_Node_List wl;
1937 Node* init_mem = call->in(TypeFunc::Memory);
1938 Node* final_mem = final_state->in(TypeFunc::Memory);
1939 Node* final_ctl = final_state->in(TypeFunc::Control);
1940 Node* final_io = final_state->in(TypeFunc::I_O);
1941
1942 // Replace all the old call edges with the edges from the inlining result
1943 if (callprojs.fallthrough_catchproj != nullptr) {
1944 C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
1945 }
1946 if (callprojs.fallthrough_memproj != nullptr) {
1947 if (final_mem->is_MergeMem()) {
1948 // Parser's exits MergeMem was not transformed but may be optimized
1949 final_mem = _gvn.transform(final_mem);
1950 }
1951 C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem);
1952 add_mergemem_users_to_worklist(wl, final_mem);
1953 }
1954 if (callprojs.fallthrough_ioproj != nullptr) {
1955 C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io);
1956 }
1957
1958 // Replace the result with the new result if it exists and is used
1959 if (callprojs.resproj != nullptr && result != nullptr) {
1960 C->gvn_replace_by(callprojs.resproj, result);
1961 }
1962
1963 if (ejvms == nullptr) {
1964 // No exception edges to simply kill off those paths
1965 if (callprojs.catchall_catchproj != nullptr) {
1966 C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
1967 }
1968 if (callprojs.catchall_memproj != nullptr) {
1969 C->gvn_replace_by(callprojs.catchall_memproj, C->top());
1970 }
1971 if (callprojs.catchall_ioproj != nullptr) {
1972 C->gvn_replace_by(callprojs.catchall_ioproj, C->top());
1973 }
1974 // Replace the old exception object with top
1975 if (callprojs.exobj != nullptr) {
1976 C->gvn_replace_by(callprojs.exobj, C->top());
1977 }
1978 } else {
1979 GraphKit ekit(ejvms);
1980
1981 // Load my combined exception state into the kit, with all phis transformed:
1982 SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
1983 replaced_nodes_exception = ex_map->replaced_nodes();
1984
1985 Node* ex_oop = ekit.use_exception_state(ex_map);
1986
1987 if (callprojs.catchall_catchproj != nullptr) {
1988 C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
1989 ex_ctl = ekit.control();
1990 }
1991 if (callprojs.catchall_memproj != nullptr) {
1992 Node* ex_mem = ekit.reset_memory();
1993 C->gvn_replace_by(callprojs.catchall_memproj, ex_mem);
1994 add_mergemem_users_to_worklist(wl, ex_mem);
1995 }
1996 if (callprojs.catchall_ioproj != nullptr) {
1997 C->gvn_replace_by(callprojs.catchall_ioproj, ekit.i_o());
1998 }
1999
2000 // Replace the old exception object with the newly created one
2001 if (callprojs.exobj != nullptr) {
2002 C->gvn_replace_by(callprojs.exobj, ex_oop);
2003 }
2004 }
2005
2006 // Disconnect the call from the graph
2007 call->disconnect_inputs(C);
2008 C->gvn_replace_by(call, C->top());
2009
2010 // Clean up any MergeMems that feed other MergeMems since the
2011 // optimizer doesn't like that.
2012 while (wl.size() > 0) {
2013 _gvn.transform(wl.pop());
2014 }
2015
2016 if (callprojs.fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) {
2017 replaced_nodes.apply(C, final_ctl);
2018 }
2019 if (!ex_ctl->is_top() && do_replaced_nodes) {
2020 replaced_nodes_exception.apply(C, ex_ctl);
2021 }
2022 }
2023
2024
2025 //------------------------------increment_counter------------------------------
2026 // for statistics: increment a VM counter by 1
2027
2028 void GraphKit::increment_counter(address counter_addr) {
2029 Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2030 increment_counter(adr1);
2031 }
2032
2033 void GraphKit::increment_counter(Node* counter_addr) {
2034 int adr_type = Compile::AliasIdxRaw;
2035 Node* ctrl = control();
2036 Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered);
2195 *
2196 * @param n node that the type applies to
2197 * @param exact_kls type from profiling
2198 * @param maybe_null did profiling see null?
2199 *
2200 * @return node with improved type
2201 */
2202 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2203 const Type* current_type = _gvn.type(n);
2204 assert(UseTypeSpeculation, "type speculation must be on");
2205
2206 const TypePtr* speculative = current_type->speculative();
2207
2208 // Should the klass from the profile be recorded in the speculative type?
2209 if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2210 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls, Type::trust_interfaces);
2211 const TypeOopPtr* xtype = tklass->as_instance_type();
2212 assert(xtype->klass_is_exact(), "Should be exact");
2213 // Any reason to believe n is not null (from this profiling or a previous one)?
2214 assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2215 const TypePtr* ptr = (ptr_kind == ProfileMaybeNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2216 // record the new speculative type's depth
2217 speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2218 speculative = speculative->with_inline_depth(jvms()->depth());
2219 } else if (current_type->would_improve_ptr(ptr_kind)) {
2220 // Profiling report that null was never seen so we can change the
2221 // speculative type to non null ptr.
2222 if (ptr_kind == ProfileAlwaysNull) {
2223 speculative = TypePtr::NULL_PTR;
2224 } else {
2225 assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2226 const TypePtr* ptr = TypePtr::NOTNULL;
2227 if (speculative != nullptr) {
2228 speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2229 } else {
2230 speculative = ptr;
2231 }
2232 }
2233 }
2234
2235 if (speculative != current_type->speculative()) {
2236 // Build a type with a speculative type (what we think we know
2237 // about the type but will need a guard when we use it)
2238 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative);
2239 // We're changing the type, we need a new CheckCast node to carry
2240 // the new type. The new type depends on the control: what
2241 // profiling tells us is only valid from here as far as we can
2242 // tell.
2243 Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2244 cast = _gvn.transform(cast);
2245 replace_in_map(n, cast);
2246 n = cast;
2247 }
2248
2249 return n;
2250 }
2251
2252 /**
2253 * Record profiling data from receiver profiling at an invoke with the
2254 * type system so that it can propagate it (speculation)
2255 *
2256 * @param n receiver node
2257 *
2258 * @return node with improved type
2259 */
2260 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2261 if (!UseTypeSpeculation) {
2262 return n;
2263 }
2264 ciKlass* exact_kls = profile_has_unique_klass();
2265 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2266 if ((java_bc() == Bytecodes::_checkcast ||
2267 java_bc() == Bytecodes::_instanceof ||
2268 java_bc() == Bytecodes::_aastore) &&
2269 method()->method_data()->is_mature()) {
2270 ciProfileData* data = method()->method_data()->bci_to_data(bci());
2271 if (data != nullptr) {
2272 if (!data->as_BitData()->null_seen()) {
2273 ptr_kind = ProfileNeverNull;
2274 } else {
2275 assert(data->is_ReceiverTypeData(), "bad profile data type");
2276 ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2277 uint i = 0;
2278 for (; i < call->row_limit(); i++) {
2279 ciKlass* receiver = call->receiver(i);
2280 if (receiver != nullptr) {
2281 break;
2282 }
2283 }
2284 ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2285 }
2286 }
2287 }
2288 return record_profile_for_speculation(n, exact_kls, ptr_kind);
2289 }
2290
2291 /**
2292 * Record profiling data from argument profiling at an invoke with the
2293 * type system so that it can propagate it (speculation)
2294 *
2295 * @param dest_method target method for the call
2296 * @param bc what invoke bytecode is this?
2297 */
2298 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2299 if (!UseTypeSpeculation) {
2300 return;
2301 }
2302 const TypeFunc* tf = TypeFunc::make(dest_method);
2303 int nargs = tf->domain()->cnt() - TypeFunc::Parms;
2304 int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2305 for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2306 const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
2307 if (is_reference_type(targ->basic_type())) {
2308 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2309 ciKlass* better_type = nullptr;
2310 if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2311 record_profile_for_speculation(argument(j), better_type, ptr_kind);
2312 }
2313 i++;
2314 }
2315 }
2316 }
2317
2318 /**
2319 * Record profiling data from parameter profiling at an invoke with
2320 * the type system so that it can propagate it (speculation)
2321 */
2322 void GraphKit::record_profiled_parameters_for_speculation() {
2323 if (!UseTypeSpeculation) {
2324 return;
2325 }
2326 for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
2340 * the type system so that it can propagate it (speculation)
2341 */
2342 void GraphKit::record_profiled_return_for_speculation() {
2343 if (!UseTypeSpeculation) {
2344 return;
2345 }
2346 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2347 ciKlass* better_type = nullptr;
2348 if (method()->return_profiled_type(bci(), better_type, ptr_kind)) {
2349 // If profiling reports a single type for the return value,
2350 // feed it to the type system so it can propagate it as a
2351 // speculative type
2352 record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind);
2353 }
2354 }
2355
2356 void GraphKit::round_double_arguments(ciMethod* dest_method) {
2357 if (Matcher::strict_fp_requires_explicit_rounding) {
2358 // (Note: TypeFunc::make has a cache that makes this fast.)
2359 const TypeFunc* tf = TypeFunc::make(dest_method);
2360 int nargs = tf->domain()->cnt() - TypeFunc::Parms;
2361 for (int j = 0; j < nargs; j++) {
2362 const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
2363 if (targ->basic_type() == T_DOUBLE) {
2364 // If any parameters are doubles, they must be rounded before
2365 // the call, dprecision_rounding does gvn.transform
2366 Node *arg = argument(j);
2367 arg = dprecision_rounding(arg);
2368 set_argument(j, arg);
2369 }
2370 }
2371 }
2372 }
2373
2374 // rounding for strict float precision conformance
2375 Node* GraphKit::precision_rounding(Node* n) {
2376 if (Matcher::strict_fp_requires_explicit_rounding) {
2377 #ifdef IA32
2378 if (UseSSE == 0) {
2379 return _gvn.transform(new RoundFloatNode(0, n));
2380 }
2381 #else
2382 Unimplemented();
2491 // The first null ends the list.
2492 Node* parm0, Node* parm1,
2493 Node* parm2, Node* parm3,
2494 Node* parm4, Node* parm5,
2495 Node* parm6, Node* parm7) {
2496 assert(call_addr != nullptr, "must not call null targets");
2497
2498 // Slow-path call
2499 bool is_leaf = !(flags & RC_NO_LEAF);
2500 bool has_io = (!is_leaf && !(flags & RC_NO_IO));
2501 if (call_name == nullptr) {
2502 assert(!is_leaf, "must supply name for leaf");
2503 call_name = OptoRuntime::stub_name(call_addr);
2504 }
2505 CallNode* call;
2506 if (!is_leaf) {
2507 call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
2508 } else if (flags & RC_NO_FP) {
2509 call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2510 } else if (flags & RC_VECTOR){
2511 uint num_bits = call_type->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
2512 call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
2513 } else {
2514 call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2515 }
2516
2517 // The following is similar to set_edges_for_java_call,
2518 // except that the memory effects of the call are restricted to AliasIdxRaw.
2519
2520 // Slow path call has no side-effects, uses few values
2521 bool wide_in = !(flags & RC_NARROW_MEM);
2522 bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2523
2524 Node* prev_mem = nullptr;
2525 if (wide_in) {
2526 prev_mem = set_predefined_input_for_runtime_call(call);
2527 } else {
2528 assert(!wide_out, "narrow in => narrow out");
2529 Node* narrow_mem = memory(adr_type);
2530 prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2531 }
2571
2572 if (has_io) {
2573 set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2574 }
2575 return call;
2576
2577 }
2578
2579 // i2b
2580 Node* GraphKit::sign_extend_byte(Node* in) {
2581 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
2582 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
2583 }
2584
2585 // i2s
2586 Node* GraphKit::sign_extend_short(Node* in) {
2587 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
2588 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
2589 }
2590
2591 //------------------------------merge_memory-----------------------------------
2592 // Merge memory from one path into the current memory state.
2593 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
2594 for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
2595 Node* old_slice = mms.force_memory();
2596 Node* new_slice = mms.memory2();
2597 if (old_slice != new_slice) {
2598 PhiNode* phi;
2599 if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
2600 if (mms.is_empty()) {
2601 // clone base memory Phi's inputs for this memory slice
2602 assert(old_slice == mms.base_memory(), "sanity");
2603 phi = PhiNode::make(region, nullptr, Type::MEMORY, mms.adr_type(C));
2604 _gvn.set_type(phi, Type::MEMORY);
2605 for (uint i = 1; i < phi->req(); i++) {
2606 phi->init_req(i, old_slice->in(i));
2607 }
2608 } else {
2609 phi = old_slice->as_Phi(); // Phi was generated already
2610 }
2867
2868 // Now do a linear scan of the secondary super-klass array. Again, no real
2869 // performance impact (too rare) but it's gotta be done.
2870 // Since the code is rarely used, there is no penalty for moving it
2871 // out of line, and it can only improve I-cache density.
2872 // The decision to inline or out-of-line this final check is platform
2873 // dependent, and is found in the AD file definition of PartialSubtypeCheck.
2874 Node* psc = gvn.transform(
2875 new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
2876
2877 IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
2878 r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
2879 r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
2880
2881 // Return false path; set default control to true path.
2882 *ctrl = gvn.transform(r_ok_subtype);
2883 return gvn.transform(r_not_subtype);
2884 }
2885
2886 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
2887 bool expand_subtype_check = C->post_loop_opts_phase() || // macro node expansion is over
2888 ExpandSubTypeCheckAtParseTime; // forced expansion
2889 if (expand_subtype_check) {
2890 MergeMemNode* mem = merged_memory();
2891 Node* ctrl = control();
2892 Node* subklass = obj_or_subklass;
2893 if (!_gvn.type(obj_or_subklass)->isa_klassptr()) {
2894 subklass = load_object_klass(obj_or_subklass);
2895 }
2896
2897 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn, method(), bci());
2898 set_control(ctrl);
2899 return n;
2900 }
2901
2902 Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass, method(), bci()));
2903 Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
2904 IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
2905 set_control(_gvn.transform(new IfTrueNode(iff)));
2906 return _gvn.transform(new IfFalseNode(iff));
2907 }
2908
2909 // Profile-driven exact type check:
2910 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
2911 float prob,
2912 Node* *casted_receiver) {
2913 assert(!klass->is_interface(), "no exact type check on interfaces");
2914
2915 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces);
2916 Node* recv_klass = load_object_klass(receiver);
2917 Node* want_klass = makecon(tklass);
2918 Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
2919 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
2920 IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
2921 set_control( _gvn.transform(new IfTrueNode (iff)));
2922 Node* fail = _gvn.transform(new IfFalseNode(iff));
2923
2924 if (!stopped()) {
2925 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
2926 const TypeOopPtr* recvx_type = tklass->as_instance_type();
2927 assert(recvx_type->klass_is_exact(), "");
2928
2929 if (!receiver_type->higher_equal(recvx_type)) { // ignore redundant casts
2930 // Subsume downstream occurrences of receiver with a cast to
2931 // recv_xtype, since now we know what the type will be.
2932 Node* cast = new CheckCastPPNode(control(), receiver, recvx_type);
2933 (*casted_receiver) = _gvn.transform(cast);
2934 assert(!(*casted_receiver)->is_top(), "that path should be unreachable");
2935 // (User must make the replace_in_map call.)
2936 }
2937 }
2938
2939 return fail;
2940 }
2941
2942 //------------------------------subtype_check_receiver-------------------------
2943 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
2944 Node** casted_receiver) {
2945 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces)->try_improve();
2946 Node* want_klass = makecon(tklass);
2947
2948 Node* slow_ctl = gen_subtype_check(receiver, want_klass);
2949
2950 // Ignore interface type information until interface types are properly tracked.
2951 if (!stopped() && !klass->is_interface()) {
2952 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
2953 const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
2954 if (!receiver_type->higher_equal(recv_type)) { // ignore redundant casts
2955 Node* cast = new CheckCastPPNode(control(), receiver, recv_type);
2956 (*casted_receiver) = _gvn.transform(cast);
2957 }
2958 }
2959
2960 return slow_ctl;
2961 }
2962
2963 //------------------------------seems_never_null-------------------------------
2964 // Use null_seen information if it is available from the profile.
2965 // If we see an unexpected null at a type check we record it and force a
2966 // recompile; the offending check will be recompiled to handle nulls.
2967 // If we see several offending BCIs, then all checks in the
2968 // method will be recompiled.
2969 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
2970 speculating = !_gvn.type(obj)->speculative_maybe_null();
2971 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
2972 if (UncommonNullCast // Cutout for this technique
2973 && obj != null() // And not the -Xcomp stupid case?
2974 && !too_many_traps(reason)
2975 ) {
2976 if (speculating) {
3045
3046 //------------------------maybe_cast_profiled_receiver-------------------------
3047 // If the profile has seen exactly one type, narrow to exactly that type.
3048 // Subsequent type checks will always fold up.
3049 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3050 const TypeKlassPtr* require_klass,
3051 ciKlass* spec_klass,
3052 bool safe_for_replace) {
3053 if (!UseTypeProfile || !TypeProfileCasts) return nullptr;
3054
3055 Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != nullptr);
3056
3057 // Make sure we haven't already deoptimized from this tactic.
3058 if (too_many_traps_or_recompiles(reason))
3059 return nullptr;
3060
3061 // (No, this isn't a call, but it's enough like a virtual call
3062 // to use the same ciMethod accessor to get the profile info...)
3063 // If we have a speculative type use it instead of profiling (which
3064 // may not help us)
3065 ciKlass* exact_kls = spec_klass == nullptr ? profile_has_unique_klass() : spec_klass;
3066 if (exact_kls != nullptr) {// no cast failures here
3067 if (require_klass == nullptr ||
3068 C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) {
3069 // If we narrow the type to match what the type profile sees or
3070 // the speculative type, we can then remove the rest of the
3071 // cast.
3072 // This is a win, even if the exact_kls is very specific,
3073 // because downstream operations, such as method calls,
3074 // will often benefit from the sharper type.
3075 Node* exact_obj = not_null_obj; // will get updated in place...
3076 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
3077 &exact_obj);
3078 { PreserveJVMState pjvms(this);
3079 set_control(slow_ctl);
3080 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3081 }
3082 if (safe_for_replace) {
3083 replace_in_map(not_null_obj, exact_obj);
3084 }
3085 return exact_obj;
3175 // If not_null_obj is dead, only null-path is taken
3176 if (stopped()) { // Doing instance-of on a null?
3177 set_control(null_ctl);
3178 return intcon(0);
3179 }
3180 region->init_req(_null_path, null_ctl);
3181 phi ->init_req(_null_path, intcon(0)); // Set null path value
3182 if (null_ctl == top()) {
3183 // Do this eagerly, so that pattern matches like is_diamond_phi
3184 // will work even during parsing.
3185 assert(_null_path == PATH_LIMIT-1, "delete last");
3186 region->del_req(_null_path);
3187 phi ->del_req(_null_path);
3188 }
3189
3190 // Do we know the type check always succeed?
3191 bool known_statically = false;
3192 if (_gvn.type(superklass)->singleton()) {
3193 const TypeKlassPtr* superk = _gvn.type(superklass)->is_klassptr();
3194 const TypeKlassPtr* subk = _gvn.type(obj)->is_oopptr()->as_klass_type();
3195 if (subk->is_loaded()) {
3196 int static_res = C->static_subtype_check(superk, subk);
3197 known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
3198 }
3199 }
3200
3201 if (!known_statically) {
3202 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3203 // We may not have profiling here or it may not help us. If we
3204 // have a speculative type use it to perform an exact cast.
3205 ciKlass* spec_obj_type = obj_type->speculative_type();
3206 if (spec_obj_type != nullptr || (ProfileDynamicTypes && data != nullptr)) {
3207 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, nullptr, spec_obj_type, safe_for_replace);
3208 if (stopped()) { // Profile disagrees with this path.
3209 set_control(null_ctl); // Null is the only remaining possibility.
3210 return intcon(0);
3211 }
3212 if (cast_obj != nullptr) {
3213 not_null_obj = cast_obj;
3214 }
3215 }
3231 record_for_igvn(region);
3232
3233 // If we know the type check always succeeds then we don't use the
3234 // profiling data at this bytecode. Don't lose it, feed it to the
3235 // type system as a speculative type.
3236 if (safe_for_replace) {
3237 Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3238 replace_in_map(obj, casted_obj);
3239 }
3240
3241 return _gvn.transform(phi);
3242 }
3243
3244 //-------------------------------gen_checkcast---------------------------------
3245 // Generate a checkcast idiom. Used by both the checkcast bytecode and the
3246 // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
3247 // uncommon-trap paths work. Adjust stack after this call.
3248 // If failure_control is supplied and not null, it is filled in with
3249 // the control edge for the cast failure. Otherwise, an appropriate
3250 // uncommon trap or exception is thrown.
3251 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
3252 Node* *failure_control) {
3253 kill_dead_locals(); // Benefit all the uncommon traps
3254 const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr()->try_improve();
3255 const TypeOopPtr *toop = tk->cast_to_exactness(false)->as_instance_type();
3256
3257 // Fast cutout: Check the case that the cast is vacuously true.
3258 // This detects the common cases where the test will short-circuit
3259 // away completely. We do this before we perform the null check,
3260 // because if the test is going to turn into zero code, we don't
3261 // want a residual null check left around. (Causes a slowdown,
3262 // for example, in some objArray manipulations, such as a[i]=a[j].)
3263 if (tk->singleton()) {
3264 const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
3265 if (objtp != nullptr) {
3266 switch (C->static_subtype_check(tk, objtp->as_klass_type())) {
3267 case Compile::SSC_always_true:
3268 // If we know the type check always succeed then we don't use
3269 // the profiling data at this bytecode. Don't lose it, feed it
3270 // to the type system as a speculative type.
3271 return record_profiled_receiver_for_speculation(obj);
3272 case Compile::SSC_always_false:
3273 // It needs a null check because a null will *pass* the cast check.
3274 // A non-null value will always produce an exception.
3275 if (!objtp->maybe_null()) {
3276 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3277 Deoptimization::DeoptReason reason = is_aastore ?
3278 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3279 builtin_throw(reason);
3280 return top();
3281 } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3282 return null_assert(obj);
3283 }
3284 break; // Fall through to full check
3285 default:
3286 break;
3287 }
3288 }
3289 }
3290
3291 ciProfileData* data = nullptr;
3292 bool safe_for_replace = false;
3293 if (failure_control == nullptr) { // use MDO in regular case only
3294 assert(java_bc() == Bytecodes::_aastore ||
3295 java_bc() == Bytecodes::_checkcast,
3296 "interpreter profiles type checks only for these BCs");
3297 data = method()->method_data()->bci_to_data(bci());
3298 safe_for_replace = true;
3299 }
3300
3301 // Make the merge point
3302 enum { _obj_path = 1, _null_path, PATH_LIMIT };
3303 RegionNode* region = new RegionNode(PATH_LIMIT);
3304 Node* phi = new PhiNode(region, toop);
3305 C->set_has_split_ifs(true); // Has chance for split-if optimization
3306
3307 // Use null-cast information if it is available
3308 bool speculative_not_null = false;
3309 bool never_see_null = ((failure_control == nullptr) // regular case only
3310 && seems_never_null(obj, data, speculative_not_null));
3311
3312 // Null check; get casted pointer; set region slot 3
3313 Node* null_ctl = top();
3314 Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3315
3316 // If not_null_obj is dead, only null-path is taken
3317 if (stopped()) { // Doing instance-of on a null?
3318 set_control(null_ctl);
3319 return null();
3320 }
3321 region->init_req(_null_path, null_ctl);
3322 phi ->init_req(_null_path, null()); // Set null path value
3323 if (null_ctl == top()) {
3324 // Do this eagerly, so that pattern matches like is_diamond_phi
3325 // will work even during parsing.
3326 assert(_null_path == PATH_LIMIT-1, "delete last");
3327 region->del_req(_null_path);
3328 phi ->del_req(_null_path);
3329 }
3330
3331 Node* cast_obj = nullptr;
3332 if (tk->klass_is_exact()) {
3333 // The following optimization tries to statically cast the speculative type of the object
3334 // (for example obtained during profiling) to the type of the superklass and then do a
3335 // dynamic check that the type of the object is what we expect. To work correctly
3336 // for checkcast and aastore the type of superklass should be exact.
3337 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3338 // We may not have profiling here or it may not help us. If we have
3339 // a speculative type use it to perform an exact cast.
3340 ciKlass* spec_obj_type = obj_type->speculative_type();
3341 if (spec_obj_type != nullptr || data != nullptr) {
3342 cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk, spec_obj_type, safe_for_replace);
3343 if (cast_obj != nullptr) {
3344 if (failure_control != nullptr) // failure is now impossible
3345 (*failure_control) = top();
3346 // adjust the type of the phi to the exact klass:
3347 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3348 }
3349 }
3350 }
3351
3352 if (cast_obj == nullptr) {
3353 // Generate the subtype check
3354 Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass );
3355
3356 // Plug in success path into the merge
3357 cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3358 // Failure path ends in uncommon trap (or may be dead - failure impossible)
3359 if (failure_control == nullptr) {
3360 if (not_subtype_ctrl != top()) { // If failure is possible
3361 PreserveJVMState pjvms(this);
3362 set_control(not_subtype_ctrl);
3363 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3364 Deoptimization::DeoptReason reason = is_aastore ?
3365 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3366 builtin_throw(reason);
3367 }
3368 } else {
3369 (*failure_control) = not_subtype_ctrl;
3370 }
3371 }
3372
3373 region->init_req(_obj_path, control());
3374 phi ->init_req(_obj_path, cast_obj);
3375
3376 // A merge of null or Casted-NotNull obj
3377 Node* res = _gvn.transform(phi);
3378
3379 // Note I do NOT always 'replace_in_map(obj,result)' here.
3380 // if( tk->klass()->can_be_primary_super() )
3381 // This means that if I successfully store an Object into an array-of-String
3382 // I 'forget' that the Object is really now known to be a String. I have to
3383 // do this because we don't have true union types for interfaces - if I store
3384 // a Baz into an array-of-Interface and then tell the optimizer it's an
3385 // Interface, I forget that it's also a Baz and cannot do Baz-like field
3386 // references to it. FIX THIS WHEN UNION TYPES APPEAR!
3387 // replace_in_map( obj, res );
3388
3389 // Return final merged results
3390 set_control( _gvn.transform(region) );
3391 record_for_igvn(region);
3392
3393 return record_profiled_receiver_for_speculation(res);
3394 }
3395
3396 //------------------------------next_monitor-----------------------------------
3397 // What number should be given to the next monitor?
3398 int GraphKit::next_monitor() {
3399 int current = jvms()->monitor_depth()* C->sync_stack_slots();
3400 int next = current + C->sync_stack_slots();
3401 // Keep the toplevel high water mark current:
3402 if (C->fixed_slots() < next) C->set_fixed_slots(next);
3403 return current;
3404 }
3405
3406 //------------------------------insert_mem_bar---------------------------------
3407 // Memory barrier to avoid floating things around
3408 // The membar serves as a pinch point between both control and all memory slices.
3409 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3410 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3411 mb->init_req(TypeFunc::Control, control());
3412 mb->init_req(TypeFunc::Memory, reset_memory());
3413 Node* membar = _gvn.transform(mb);
3441 }
3442 Node* membar = _gvn.transform(mb);
3443 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3444 if (alias_idx == Compile::AliasIdxBot) {
3445 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3446 } else {
3447 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3448 }
3449 return membar;
3450 }
3451
3452 //------------------------------shared_lock------------------------------------
3453 // Emit locking code.
3454 FastLockNode* GraphKit::shared_lock(Node* obj) {
3455 // bci is either a monitorenter bc or InvocationEntryBci
3456 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3457 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3458
3459 if( !GenerateSynchronizationCode )
3460 return nullptr; // Not locking things?
3461 if (stopped()) // Dead monitor?
3462 return nullptr;
3463
3464 assert(dead_locals_are_killed(), "should kill locals before sync. point");
3465
3466 // Box the stack location
3467 Node* box = _gvn.transform(new BoxLockNode(next_monitor()));
3468 Node* mem = reset_memory();
3469
3470 FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
3471
3472 // Create the rtm counters for this fast lock if needed.
3473 flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3474
3475 // Add monitor to debug info for the slow path. If we block inside the
3476 // slow path and de-opt, we need the monitor hanging around
3477 map()->push_monitor( flock );
3478
3479 const TypeFunc *tf = LockNode::lock_type();
3480 LockNode *lock = new LockNode(C, tf);
3509 }
3510 #endif
3511
3512 return flock;
3513 }
3514
3515
3516 //------------------------------shared_unlock----------------------------------
3517 // Emit unlocking code.
3518 void GraphKit::shared_unlock(Node* box, Node* obj) {
3519 // bci is either a monitorenter bc or InvocationEntryBci
3520 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3521 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3522
3523 if( !GenerateSynchronizationCode )
3524 return;
3525 if (stopped()) { // Dead monitor?
3526 map()->pop_monitor(); // Kill monitor from debug info
3527 return;
3528 }
3529
3530 // Memory barrier to avoid floating things down past the locked region
3531 insert_mem_bar(Op_MemBarReleaseLock);
3532
3533 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3534 UnlockNode *unlock = new UnlockNode(C, tf);
3535 #ifdef ASSERT
3536 unlock->set_dbg_jvms(sync_jvms());
3537 #endif
3538 uint raw_idx = Compile::AliasIdxRaw;
3539 unlock->init_req( TypeFunc::Control, control() );
3540 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3541 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3542 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3543 unlock->init_req( TypeFunc::ReturnAdr, top() );
3544
3545 unlock->init_req(TypeFunc::Parms + 0, obj);
3546 unlock->init_req(TypeFunc::Parms + 1, box);
3547 unlock = _gvn.transform(unlock)->as_Unlock();
3548
3549 Node* mem = reset_memory();
3550
3551 // unlock has no side-effects, sets few values
3552 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3553
3554 // Kill monitor from debug info
3555 map()->pop_monitor( );
3556 }
3557
3558 //-------------------------------get_layout_helper-----------------------------
3559 // If the given klass is a constant or known to be an array,
3560 // fetch the constant layout helper value into constant_value
3561 // and return null. Otherwise, load the non-constant
3562 // layout helper value, and return the node which represents it.
3563 // This two-faced routine is useful because allocation sites
3564 // almost always feature constant types.
3565 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3566 const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
3567 if (!StressReflectiveCode && klass_t != nullptr) {
3568 bool xklass = klass_t->klass_is_exact();
3569 if (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM)) {
3570 jint lhelper;
3571 if (klass_t->isa_aryklassptr()) {
3572 BasicType elem = klass_t->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type();
3573 if (is_reference_type(elem, true)) {
3574 elem = T_OBJECT;
3575 }
3576 lhelper = Klass::array_layout_helper(elem);
3577 } else {
3578 lhelper = klass_t->is_instklassptr()->exact_klass()->layout_helper();
3579 }
3580 if (lhelper != Klass::_lh_neutral_value) {
3581 constant_value = lhelper;
3582 return (Node*) nullptr;
3583 }
3584 }
3585 }
3586 constant_value = Klass::_lh_neutral_value; // put in a known value
3587 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3588 return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered);
3589 }
3590
3591 // We just put in an allocate/initialize with a big raw-memory effect.
3592 // Hook selected additional alias categories on the initialization.
3593 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3594 MergeMemNode* init_in_merge,
3595 Node* init_out_raw) {
3596 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
3597 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
3598
3599 Node* prevmem = kit.memory(alias_idx);
3600 init_in_merge->set_memory_at(alias_idx, prevmem);
3601 kit.set_memory(init_out_raw, alias_idx);
3602 }
3603
3604 //---------------------------set_output_for_allocation-------------------------
3605 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
3606 const TypeOopPtr* oop_type,
3607 bool deoptimize_on_exception) {
3608 int rawidx = Compile::AliasIdxRaw;
3609 alloc->set_req( TypeFunc::FramePtr, frameptr() );
3610 add_safepoint_edges(alloc);
3611 Node* allocx = _gvn.transform(alloc);
3612 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
3613 // create memory projection for i_o
3614 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
3615 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
3616
3617 // create a memory projection as for the normal control path
3618 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3619 set_memory(malloc, rawidx);
3620
3621 // a normal slow-call doesn't change i_o, but an allocation does
3622 // we create a separate i_o projection for the normal control path
3623 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3624 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3625
3626 // put in an initialization barrier
3627 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
3628 rawoop)->as_Initialize();
3629 assert(alloc->initialization() == init, "2-way macro link must work");
3630 assert(init ->allocation() == alloc, "2-way macro link must work");
3631 {
3632 // Extract memory strands which may participate in the new object's
3633 // initialization, and source them from the new InitializeNode.
3634 // This will allow us to observe initializations when they occur,
3635 // and link them properly (as a group) to the InitializeNode.
3636 assert(init->in(InitializeNode::Memory) == malloc, "");
3637 MergeMemNode* minit_in = MergeMemNode::make(malloc);
3638 init->set_req(InitializeNode::Memory, minit_in);
3639 record_for_igvn(minit_in); // fold it up later, if possible
3640 Node* minit_out = memory(rawidx);
3641 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
3642 // Add an edge in the MergeMem for the header fields so an access
3643 // to one of those has correct memory state
3644 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
3645 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
3646 if (oop_type->isa_aryptr()) {
3647 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
3648 int elemidx = C->get_alias_index(telemref);
3649 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
3650 } else if (oop_type->isa_instptr()) {
3651 ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
3652 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
3653 ciField* field = ik->nonstatic_field_at(i);
3654 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
3655 continue; // do not bother to track really large numbers of fields
3656 // Find (or create) the alias category for this field:
3657 int fieldidx = C->alias_type(field)->index();
3658 hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3659 }
3660 }
3661 }
3662
3663 // Cast raw oop to the real thing...
3664 Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
3665 javaoop = _gvn.transform(javaoop);
3666 C->set_recent_alloc(control(), javaoop);
3667 assert(just_allocated_object(control()) == javaoop, "just allocated");
3668
3669 #ifdef ASSERT
3670 { // Verify that the AllocateNode::Ideal_allocation recognizers work:
3681 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
3682 }
3683 }
3684 #endif //ASSERT
3685
3686 return javaoop;
3687 }
3688
3689 //---------------------------new_instance--------------------------------------
3690 // This routine takes a klass_node which may be constant (for a static type)
3691 // or may be non-constant (for reflective code). It will work equally well
3692 // for either, and the graph will fold nicely if the optimizer later reduces
3693 // the type to a constant.
3694 // The optional arguments are for specialized use by intrinsics:
3695 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3696 // - If 'return_size_val', report the total object size to the caller.
3697 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3698 Node* GraphKit::new_instance(Node* klass_node,
3699 Node* extra_slow_test,
3700 Node* *return_size_val,
3701 bool deoptimize_on_exception) {
3702 // Compute size in doublewords
3703 // The size is always an integral number of doublewords, represented
3704 // as a positive bytewise size stored in the klass's layout_helper.
3705 // The layout_helper also encodes (in a low bit) the need for a slow path.
3706 jint layout_con = Klass::_lh_neutral_value;
3707 Node* layout_val = get_layout_helper(klass_node, layout_con);
3708 int layout_is_con = (layout_val == nullptr);
3709
3710 if (extra_slow_test == nullptr) extra_slow_test = intcon(0);
3711 // Generate the initial go-slow test. It's either ALWAYS (return a
3712 // Node for 1) or NEVER (return a null) or perhaps (in the reflective
3713 // case) a computed value derived from the layout_helper.
3714 Node* initial_slow_test = nullptr;
3715 if (layout_is_con) {
3716 assert(!StressReflectiveCode, "stress mode does not use these paths");
3717 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3718 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3719 } else { // reflective case
3720 // This reflective path is used by Unsafe.allocateInstance.
3721 // (It may be stress-tested by specifying StressReflectiveCode.)
3722 // Basically, we want to get into the VM is there's an illegal argument.
3723 Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
3724 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
3725 if (extra_slow_test != intcon(0)) {
3726 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
3727 }
3728 // (Macro-expander will further convert this to a Bool, if necessary.)
3739
3740 // Clear the low bits to extract layout_helper_size_in_bytes:
3741 assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
3742 Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
3743 size = _gvn.transform( new AndXNode(size, mask) );
3744 }
3745 if (return_size_val != nullptr) {
3746 (*return_size_val) = size;
3747 }
3748
3749 // This is a precise notnull oop of the klass.
3750 // (Actually, it need not be precise if this is a reflective allocation.)
3751 // It's what we cast the result to.
3752 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
3753 if (!tklass) tklass = TypeInstKlassPtr::OBJECT;
3754 const TypeOopPtr* oop_type = tklass->as_instance_type();
3755
3756 // Now generate allocation code
3757
3758 // The entire memory state is needed for slow path of the allocation
3759 // since GC and deoptimization can happened.
3760 Node *mem = reset_memory();
3761 set_all_memory(mem); // Create new memory state
3762
3763 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3764 control(), mem, i_o(),
3765 size, klass_node,
3766 initial_slow_test);
3767
3768 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
3769 }
3770
3771 //-------------------------------new_array-------------------------------------
3772 // helper for both newarray and anewarray
3773 // The 'length' parameter is (obviously) the length of the array.
3774 // The optional arguments are for specialized use by intrinsics:
3775 // - If 'return_size_val', report the non-padded array size (sum of header size
3776 // and array body) to the caller.
3777 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3778 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3779 Node* length, // number of array elements
3780 int nargs, // number of arguments to push back for uncommon trap
3781 Node* *return_size_val,
3782 bool deoptimize_on_exception) {
3783 jint layout_con = Klass::_lh_neutral_value;
3784 Node* layout_val = get_layout_helper(klass_node, layout_con);
3785 int layout_is_con = (layout_val == nullptr);
3786
3787 if (!layout_is_con && !StressReflectiveCode &&
3788 !too_many_traps(Deoptimization::Reason_class_check)) {
3789 // This is a reflective array creation site.
3790 // Optimistically assume that it is a subtype of Object[],
3791 // so that we can fold up all the address arithmetic.
3792 layout_con = Klass::array_layout_helper(T_OBJECT);
3793 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
3794 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
3795 { BuildCutout unless(this, bol_lh, PROB_MAX);
3796 inc_sp(nargs);
3797 uncommon_trap(Deoptimization::Reason_class_check,
3798 Deoptimization::Action_maybe_recompile);
3799 }
3800 layout_val = nullptr;
3801 layout_is_con = true;
3802 }
3803
3804 // Generate the initial go-slow test. Make sure we do not overflow
3805 // if length is huge (near 2Gig) or negative! We do not need
3806 // exact double-words here, just a close approximation of needed
3807 // double-words. We can't add any offset or rounding bits, lest we
3808 // take a size -1 of bytes and make it positive. Use an unsigned
3809 // compare, so negative sizes look hugely positive.
3810 int fast_size_limit = FastAllocateSizeLimit;
3811 if (layout_is_con) {
3812 assert(!StressReflectiveCode, "stress mode does not use these paths");
3813 // Increase the size limit if we have exact knowledge of array type.
3814 int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
3815 fast_size_limit <<= (LogBytesPerLong - log2_esize);
3816 }
3817
3818 Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
3819 Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
3820
3821 // --- Size Computation ---
3822 // array_size = round_to_heap(array_header + (length << elem_shift));
3823 // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
3824 // and align_to(x, y) == ((x + y-1) & ~(y-1))
3825 // The rounding mask is strength-reduced, if possible.
3826 int round_mask = MinObjAlignmentInBytes - 1;
3827 Node* header_size = nullptr;
3828 // (T_BYTE has the weakest alignment and size restrictions...)
3829 if (layout_is_con) {
3830 int hsize = Klass::layout_helper_header_size(layout_con);
3831 int eshift = Klass::layout_helper_log2_element_size(layout_con);
3832 if ((round_mask & ~right_n_bits(eshift)) == 0)
3833 round_mask = 0; // strength-reduce it if it goes away completely
3834 assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
3835 int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
3836 assert(header_size_min <= hsize, "generic minimum is smallest");
3837 header_size = intcon(hsize);
3838 } else {
3839 Node* hss = intcon(Klass::_lh_header_size_shift);
3840 Node* hsm = intcon(Klass::_lh_header_size_mask);
3841 header_size = _gvn.transform(new URShiftINode(layout_val, hss));
3842 header_size = _gvn.transform(new AndINode(header_size, hsm));
3843 }
3844
3845 Node* elem_shift = nullptr;
3846 if (layout_is_con) {
3847 int eshift = Klass::layout_helper_log2_element_size(layout_con);
3848 if (eshift != 0)
3849 elem_shift = intcon(eshift);
3850 } else {
3851 // There is no need to mask or shift this value.
3852 // The semantics of LShiftINode include an implicit mask to 0x1F.
3853 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
3854 elem_shift = layout_val;
3901 }
3902 Node* non_rounded_size = _gvn.transform(new AddXNode(headerx, abody));
3903
3904 if (return_size_val != nullptr) {
3905 // This is the size
3906 (*return_size_val) = non_rounded_size;
3907 }
3908
3909 Node* size = non_rounded_size;
3910 if (round_mask != 0) {
3911 Node* mask1 = MakeConX(round_mask);
3912 size = _gvn.transform(new AddXNode(size, mask1));
3913 Node* mask2 = MakeConX(~round_mask);
3914 size = _gvn.transform(new AndXNode(size, mask2));
3915 }
3916 // else if round_mask == 0, the size computation is self-rounding
3917
3918 // Now generate allocation code
3919
3920 // The entire memory state is needed for slow path of the allocation
3921 // since GC and deoptimization can happened.
3922 Node *mem = reset_memory();
3923 set_all_memory(mem); // Create new memory state
3924
3925 if (initial_slow_test->is_Bool()) {
3926 // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
3927 initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
3928 }
3929
3930 const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3931 Node* valid_length_test = _gvn.intcon(1);
3932 if (ary_type->isa_aryptr()) {
3933 BasicType bt = ary_type->isa_aryptr()->elem()->array_element_basic_type();
3934 jint max = TypeAryPtr::max_array_length(bt);
3935 Node* valid_length_cmp = _gvn.transform(new CmpUNode(length, intcon(max)));
3936 valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le));
3937 }
3938
3939 // Create the AllocateArrayNode and its result projections
3940 AllocateArrayNode* alloc
3941 = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3942 control(), mem, i_o(),
3943 size, klass_node,
3944 initial_slow_test,
3945 length, valid_length_test);
3946
3947 // Cast to correct type. Note that the klass_node may be constant or not,
3948 // and in the latter case the actual array type will be inexact also.
3949 // (This happens via a non-constant argument to inline_native_newArray.)
3950 // In any case, the value of klass_node provides the desired array type.
3951 const TypeInt* length_type = _gvn.find_int_type(length);
3952 if (ary_type->isa_aryptr() && length_type != nullptr) {
3953 // Try to get a better type than POS for the size
3954 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3955 }
3956
3957 Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
3958
3959 array_ideal_length(alloc, ary_type, true);
3960 return javaoop;
3961 }
3962
3963 // The following "Ideal_foo" functions are placed here because they recognize
3964 // the graph shapes created by the functions immediately above.
3965
3966 //---------------------------Ideal_allocation----------------------------------
4073 set_all_memory(ideal.merged_memory());
4074 set_i_o(ideal.i_o());
4075 set_control(ideal.ctrl());
4076 }
4077
4078 void GraphKit::final_sync(IdealKit& ideal) {
4079 // Final sync IdealKit and graphKit.
4080 sync_kit(ideal);
4081 }
4082
4083 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4084 Node* len = load_array_length(load_String_value(str, set_ctrl));
4085 Node* coder = load_String_coder(str, set_ctrl);
4086 // Divide length by 2 if coder is UTF16
4087 return _gvn.transform(new RShiftINode(len, coder));
4088 }
4089
4090 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4091 int value_offset = java_lang_String::value_offset();
4092 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4093 false, nullptr, 0);
4094 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4095 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4096 TypeAry::make(TypeInt::BYTE, TypeInt::POS),
4097 ciTypeArrayKlass::make(T_BYTE), true, 0);
4098 Node* p = basic_plus_adr(str, str, value_offset);
4099 Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4100 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4101 return load;
4102 }
4103
4104 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4105 if (!CompactStrings) {
4106 return intcon(java_lang_String::CODER_UTF16);
4107 }
4108 int coder_offset = java_lang_String::coder_offset();
4109 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4110 false, nullptr, 0);
4111 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4112
4113 Node* p = basic_plus_adr(str, str, coder_offset);
4114 Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4115 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4116 return load;
4117 }
4118
4119 void GraphKit::store_String_value(Node* str, Node* value) {
4120 int value_offset = java_lang_String::value_offset();
4121 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4122 false, nullptr, 0);
4123 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4124
4125 access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,
4126 value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4127 }
4128
4129 void GraphKit::store_String_coder(Node* str, Node* value) {
4130 int coder_offset = java_lang_String::coder_offset();
4131 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4132 false, nullptr, 0);
4133 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4134
4135 access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4136 value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4137 }
4138
4139 // Capture src and dst memory state with a MergeMemNode
4140 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4141 if (src_type == dst_type) {
4142 // Types are equal, we don't need a MergeMemNode
4143 return memory(src_type);
4144 }
4145 MergeMemNode* merge = MergeMemNode::make(map()->memory());
4146 record_for_igvn(merge); // fold it up later, if possible
4147 int src_idx = C->get_alias_index(src_type);
4148 int dst_idx = C->get_alias_index(dst_type);
4149 merge->set_memory_at(src_idx, memory(src_idx));
4150 merge->set_memory_at(dst_idx, memory(dst_idx));
4151 return merge;
4152 }
4225 i_char->init_req(2, AddI(i_char, intcon(2)));
4226
4227 set_control(IfFalse(iff));
4228 set_memory(st, TypeAryPtr::BYTES);
4229 }
4230
4231 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4232 if (!field->is_constant()) {
4233 return nullptr; // Field not marked as constant.
4234 }
4235 ciInstance* holder = nullptr;
4236 if (!field->is_static()) {
4237 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4238 if (const_oop != nullptr && const_oop->is_instance()) {
4239 holder = const_oop->as_instance();
4240 }
4241 }
4242 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4243 /*is_unsigned_load=*/false);
4244 if (con_type != nullptr) {
4245 return makecon(con_type);
4246 }
4247 return nullptr;
4248 }
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/ciFlatArrayKlass.hpp"
27 #include "ci/ciInlineKlass.hpp"
28 #include "ci/ciUtilities.hpp"
29 #include "classfile/javaClasses.hpp"
30 #include "ci/ciObjArray.hpp"
31 #include "asm/register.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/c2/barrierSetC2.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "opto/addnode.hpp"
38 #include "opto/castnode.hpp"
39 #include "opto/convertnode.hpp"
40 #include "opto/graphKit.hpp"
41 #include "opto/idealKit.hpp"
42 #include "opto/inlinetypenode.hpp"
43 #include "opto/intrinsicnode.hpp"
44 #include "opto/locknode.hpp"
45 #include "opto/machnode.hpp"
46 #include "opto/narrowptrnode.hpp"
47 #include "opto/opaquenode.hpp"
48 #include "opto/parse.hpp"
49 #include "opto/rootnode.hpp"
50 #include "opto/runtime.hpp"
51 #include "opto/subtypenode.hpp"
52 #include "runtime/deoptimization.hpp"
53 #include "runtime/sharedRuntime.hpp"
54 #include "utilities/bitMap.inline.hpp"
55 #include "utilities/powerOfTwo.hpp"
56 #include "utilities/growableArray.hpp"
57
58 //----------------------------GraphKit-----------------------------------------
59 // Main utility constructor.
60 GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn)
61 : Phase(Phase::Parser),
62 _env(C->env()),
63 _gvn((gvn != nullptr) ? *gvn : *C->initial_gvn()),
64 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
65 {
66 assert(gvn == nullptr || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled");
67 _exceptions = jvms->map()->next_exception();
68 if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr);
69 set_jvms(jvms);
70 #ifdef ASSERT
71 if (_gvn.is_IterGVN() != nullptr) {
72 assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used");
73 // Save the initial size of _for_igvn worklist for verification (see ~GraphKit)
74 _worklist_size = _gvn.C->igvn_worklist()->size();
75 }
76 #endif
77 }
78
79 // Private constructor for parser.
80 GraphKit::GraphKit()
81 : Phase(Phase::Parser),
82 _env(C->env()),
83 _gvn(*C->initial_gvn()),
84 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
85 {
86 _exceptions = nullptr;
87 set_map(nullptr);
88 debug_only(_sp = -99);
89 debug_only(set_bci(-99));
90 }
91
92
93
94 //---------------------------clean_stack---------------------------------------
95 // Clear away rubbish from the stack area of the JVM state.
96 // This destroys any arguments that may be waiting on the stack.
852 if (PrintMiscellaneous && (Verbose || WizardMode)) {
853 tty->print_cr("Zombie local %d: ", local);
854 jvms->dump();
855 }
856 return false;
857 }
858 }
859 }
860 return true;
861 }
862
863 #endif //ASSERT
864
865 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.
866 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
867 ciMethod* cur_method = jvms->method();
868 int cur_bci = jvms->bci();
869 if (cur_method != nullptr && cur_bci != InvocationEntryBci) {
870 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
871 return Interpreter::bytecode_should_reexecute(code) ||
872 (is_anewarray && (code == Bytecodes::_multianewarray));
873 // Reexecute _multianewarray bytecode which was replaced with
874 // sequence of [a]newarray. See Parse::do_multianewarray().
875 //
876 // Note: interpreter should not have it set since this optimization
877 // is limited by dimensions and guarded by flag so in some cases
878 // multianewarray() runtime calls will be generated and
879 // the bytecode should not be reexecutes (stack will not be reset).
880 } else {
881 return false;
882 }
883 }
884
885 // Helper function for adding JVMState and debug information to node
886 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
887 // Add the safepoint edges to the call (or other safepoint).
888
889 // Make sure dead locals are set to top. This
890 // should help register allocation time and cut down on the size
891 // of the deoptimization information.
892 assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
968 uint p = debug_start; // walks forward in [debug_start, debug_end)
969 uint j, k, l;
970 SafePointNode* in_map = in_jvms->map();
971 out_jvms->set_map(call);
972
973 if (can_prune_locals) {
974 assert(in_jvms->method() == out_jvms->method(), "sanity");
975 // If the current throw can reach an exception handler in this JVMS,
976 // then we must keep everything live that can reach that handler.
977 // As a quick and dirty approximation, we look for any handlers at all.
978 if (in_jvms->method()->has_exception_handlers()) {
979 can_prune_locals = false;
980 }
981 }
982
983 // Add the Locals
984 k = in_jvms->locoff();
985 l = in_jvms->loc_size();
986 out_jvms->set_locoff(p);
987 if (!can_prune_locals) {
988 for (j = 0; j < l; j++) {
989 call->set_req(p++, in_map->in(k+j));
990 Node* local = in_map->in(k+j);
991 // TODO 8325106
992 /*
993 if (false && local->is_InlineType() && local->isa_InlineType()->is_larval()) {
994 tty->print_cr("LARVAL FOUND in LOCAL");
995 in_map->dump(0);
996 local->dump(0);
997 }
998 */
999 }
1000 } else {
1001 p += l; // already set to top above by add_req_batch
1002 }
1003
1004 // Add the Expression Stack
1005 k = in_jvms->stkoff();
1006 l = in_jvms->sp();
1007 out_jvms->set_stkoff(p);
1008 if (!can_prune_locals) {
1009 for (j = 0; j < l; j++) {
1010 call->set_req(p++, in_map->in(k+j));
1011 Node* local = in_map->in(k+j);
1012 // TODO 8325106 check if there's a larval on stack in the caller state that has been written in the callee state and update it accordingly
1013 /*
1014 if (false && local->is_InlineType() && local->isa_InlineType()->is_larval()) {
1015 tty->print_cr("LARVAL FOUND on STACK");
1016 in_map->dump(0);
1017 local->dump(0);
1018 map()->replaced_nodes().dump(tty);
1019 map()->replaced_nodes().apply(call, 0);
1020 tty->print_cr("");
1021 }
1022 */
1023 }
1024 } else if (can_prune_locals && stack_slots_not_pruned != 0) {
1025 // Divide stack into {S0,...,S1}, where S0 is set to top.
1026 uint s1 = stack_slots_not_pruned;
1027 stack_slots_not_pruned = 0; // for next iteration
1028 if (s1 > l) s1 = l;
1029 uint s0 = l - s1;
1030 p += s0; // skip the tops preinstalled by add_req_batch
1031 for (j = s0; j < l; j++)
1032 call->set_req(p++, in_map->in(k+j));
1033 } else {
1034 p += l; // already set to top above by add_req_batch
1035 }
1036
1037 // Add the Monitors
1038 k = in_jvms->monoff();
1039 l = in_jvms->mon_size();
1040 out_jvms->set_monoff(p);
1041 for (j = 0; j < l; j++)
1042 call->set_req(p++, in_map->in(k+j));
1043
1217 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1218 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1219 return _gvn.transform( new AndLNode(conv, mask) );
1220 }
1221
1222 Node* GraphKit::ConvL2I(Node* offset) {
1223 // short-circuit a common case
1224 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1225 if (offset_con != (jlong)Type::OffsetBot) {
1226 return intcon((int) offset_con);
1227 }
1228 return _gvn.transform( new ConvL2INode(offset));
1229 }
1230
1231 //-------------------------load_object_klass-----------------------------------
1232 Node* GraphKit::load_object_klass(Node* obj) {
1233 // Special-case a fresh allocation to avoid building nodes:
1234 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1235 if (akls != nullptr) return akls;
1236 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1237 return _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
1238 }
1239
1240 //-------------------------load_array_length-----------------------------------
1241 Node* GraphKit::load_array_length(Node* array) {
1242 // Special-case a fresh allocation to avoid building nodes:
1243 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array);
1244 Node *alen;
1245 if (alloc == nullptr) {
1246 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1247 alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
1248 } else {
1249 alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false);
1250 }
1251 return alen;
1252 }
1253
1254 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc,
1255 const TypeOopPtr* oop_type,
1256 bool replace_length_in_map) {
1257 Node* length = alloc->Ideal_length();
1266 replace_in_map(length, ccast);
1267 }
1268 return ccast;
1269 }
1270 }
1271 return length;
1272 }
1273
1274 //------------------------------do_null_check----------------------------------
1275 // Helper function to do a null pointer check. Returned value is
1276 // the incoming address with null casted away. You are allowed to use the
1277 // not-null value only if you are control dependent on the test.
1278 #ifndef PRODUCT
1279 extern uint explicit_null_checks_inserted,
1280 explicit_null_checks_elided;
1281 #endif
1282 Node* GraphKit::null_check_common(Node* value, BasicType type,
1283 // optional arguments for variations:
1284 bool assert_null,
1285 Node* *null_control,
1286 bool speculative,
1287 bool is_init_check) {
1288 assert(!assert_null || null_control == nullptr, "not both at once");
1289 if (stopped()) return top();
1290 NOT_PRODUCT(explicit_null_checks_inserted++);
1291
1292 if (value->is_InlineType()) {
1293 // Null checking a scalarized but nullable inline type. Check the IsInit
1294 // input instead of the oop input to avoid keeping buffer allocations alive.
1295 InlineTypeNode* vtptr = value->as_InlineType();
1296 while (vtptr->get_oop()->is_InlineType()) {
1297 vtptr = vtptr->get_oop()->as_InlineType();
1298 }
1299 null_check_common(vtptr->get_is_init(), T_INT, assert_null, null_control, speculative, true);
1300 if (stopped()) {
1301 return top();
1302 }
1303 if (assert_null) {
1304 // TODO 8284443 Scalarize here (this currently leads to compilation bailouts)
1305 // vtptr = InlineTypeNode::make_null(_gvn, vtptr->type()->inline_klass());
1306 // replace_in_map(value, vtptr);
1307 // return vtptr;
1308 return null();
1309 }
1310 bool do_replace_in_map = (null_control == nullptr || (*null_control) == top());
1311 return cast_not_null(value, do_replace_in_map);
1312 }
1313
1314 // Construct null check
1315 Node *chk = nullptr;
1316 switch(type) {
1317 case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1318 case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;
1319 case T_ARRAY : // fall through
1320 type = T_OBJECT; // simplify further tests
1321 case T_OBJECT : {
1322 const Type *t = _gvn.type( value );
1323
1324 const TypeOopPtr* tp = t->isa_oopptr();
1325 if (tp != nullptr && !tp->is_loaded()
1326 // Only for do_null_check, not any of its siblings:
1327 && !assert_null && null_control == nullptr) {
1328 // Usually, any field access or invocation on an unloaded oop type
1329 // will simply fail to link, since the statically linked class is
1330 // likely also to be unloaded. However, in -Xcomp mode, sometimes
1331 // the static class is loaded but the sharper oop type is not.
1332 // Rather than checking for this obscure case in lots of places,
1333 // we simply observe that a null check on an unloaded class
1397 }
1398 Node *oldcontrol = control();
1399 set_control(cfg);
1400 Node *res = cast_not_null(value);
1401 set_control(oldcontrol);
1402 NOT_PRODUCT(explicit_null_checks_elided++);
1403 return res;
1404 }
1405 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1406 if (cfg == nullptr) break; // Quit at region nodes
1407 depth++;
1408 }
1409 }
1410
1411 //-----------
1412 // Branch to failure if null
1413 float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
1414 Deoptimization::DeoptReason reason;
1415 if (assert_null) {
1416 reason = Deoptimization::reason_null_assert(speculative);
1417 } else if (type == T_OBJECT || is_init_check) {
1418 reason = Deoptimization::reason_null_check(speculative);
1419 } else {
1420 reason = Deoptimization::Reason_div0_check;
1421 }
1422 // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1423 // ciMethodData::has_trap_at will return a conservative -1 if any
1424 // must-be-null assertion has failed. This could cause performance
1425 // problems for a method after its first do_null_assert failure.
1426 // Consider using 'Reason_class_check' instead?
1427
1428 // To cause an implicit null check, we set the not-null probability
1429 // to the maximum (PROB_MAX). For an explicit check the probability
1430 // is set to a smaller value.
1431 if (null_control != nullptr || too_many_traps(reason)) {
1432 // probability is less likely
1433 ok_prob = PROB_LIKELY_MAG(3);
1434 } else if (!assert_null &&
1435 (ImplicitNullCheckThreshold > 0) &&
1436 method() != nullptr &&
1437 (method()->method_data()->trap_count(reason)
1471 }
1472
1473 if (assert_null) {
1474 // Cast obj to null on this path.
1475 replace_in_map(value, zerocon(type));
1476 return zerocon(type);
1477 }
1478
1479 // Cast obj to not-null on this path, if there is no null_control.
1480 // (If there is a null_control, a non-null value may come back to haunt us.)
1481 if (type == T_OBJECT) {
1482 Node* cast = cast_not_null(value, false);
1483 if (null_control == nullptr || (*null_control) == top())
1484 replace_in_map(value, cast);
1485 value = cast;
1486 }
1487
1488 return value;
1489 }
1490
1491 //------------------------------cast_not_null----------------------------------
1492 // Cast obj to not-null on this path
1493 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1494 if (obj->is_InlineType()) {
1495 Node* vt = obj->clone();
1496 vt->as_InlineType()->set_is_init(_gvn);
1497 vt = _gvn.transform(vt);
1498 if (do_replace_in_map) {
1499 replace_in_map(obj, vt);
1500 }
1501 return vt;
1502 }
1503 const Type *t = _gvn.type(obj);
1504 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1505 // Object is already not-null?
1506 if( t == t_not_null ) return obj;
1507
1508 Node *cast = new CastPPNode(obj,t_not_null);
1509 cast->init_req(0, control());
1510 cast = _gvn.transform( cast );
1511
1512 // Scan for instances of 'obj' in the current JVM mapping.
1513 // These instances are known to be not-null after the test.
1514 if (do_replace_in_map)
1515 replace_in_map(obj, cast);
1516
1517 return cast; // Return casted value
1518 }
1519
1520 // Sometimes in intrinsics, we implicitly know an object is not null
1521 // (there's no actual null check) so we can cast it to not null. In
1522 // the course of optimizations, the input to the cast can become null.
1609 // These are layered on top of the factory methods in LoadNode and StoreNode,
1610 // and integrate with the parser's memory state and _gvn engine.
1611 //
1612
1613 // factory methods in "int adr_idx"
1614 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1615 int adr_idx,
1616 MemNode::MemOrd mo,
1617 LoadNode::ControlDependency control_dependency,
1618 bool require_atomic_access,
1619 bool unaligned,
1620 bool mismatched,
1621 bool unsafe,
1622 uint8_t barrier_data) {
1623 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1624 const TypePtr* adr_type = nullptr; // debug-mode-only argument
1625 debug_only(adr_type = C->get_adr_type(adr_idx));
1626 Node* mem = memory(adr_idx);
1627 Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
1628 ld = _gvn.transform(ld);
1629
1630 if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1631 // Improve graph before escape analysis and boxing elimination.
1632 record_for_igvn(ld);
1633 }
1634 return ld;
1635 }
1636
1637 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1638 int adr_idx,
1639 MemNode::MemOrd mo,
1640 bool require_atomic_access,
1641 bool unaligned,
1642 bool mismatched,
1643 bool unsafe,
1644 int barrier_data) {
1645 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1646 const TypePtr* adr_type = nullptr;
1647 debug_only(adr_type = C->get_adr_type(adr_idx));
1648 Node *mem = memory(adr_idx);
1649 Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access);
1656 if (unsafe) {
1657 st->as_Store()->set_unsafe_access();
1658 }
1659 st->as_Store()->set_barrier_data(barrier_data);
1660 st = _gvn.transform(st);
1661 set_memory(st, adr_idx);
1662 // Back-to-back stores can only remove intermediate store with DU info
1663 // so push on worklist for optimizer.
1664 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1665 record_for_igvn(st);
1666
1667 return st;
1668 }
1669
1670 Node* GraphKit::access_store_at(Node* obj,
1671 Node* adr,
1672 const TypePtr* adr_type,
1673 Node* val,
1674 const Type* val_type,
1675 BasicType bt,
1676 DecoratorSet decorators,
1677 bool safe_for_replace) {
1678 // Transformation of a value which could be null pointer (CastPP #null)
1679 // could be delayed during Parse (for example, in adjust_map_after_if()).
1680 // Execute transformation here to avoid barrier generation in such case.
1681 if (_gvn.type(val) == TypePtr::NULL_PTR) {
1682 val = _gvn.makecon(TypePtr::NULL_PTR);
1683 }
1684
1685 if (stopped()) {
1686 return top(); // Dead path ?
1687 }
1688
1689 assert(val != nullptr, "not dead path");
1690 if (val->is_InlineType()) {
1691 // Store to non-flat field. Buffer the inline type and make sure
1692 // the store is re-executed if the allocation triggers deoptimization.
1693 PreserveReexecuteState preexecs(this);
1694 jvms()->set_should_reexecute(true);
1695 val = val->as_InlineType()->buffer(this, safe_for_replace);
1696 }
1697
1698 C2AccessValuePtr addr(adr, adr_type);
1699 C2AccessValue value(val, val_type);
1700 C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1701 if (access.is_raw()) {
1702 return _barrier_set->BarrierSetC2::store_at(access, value);
1703 } else {
1704 return _barrier_set->store_at(access, value);
1705 }
1706 }
1707
1708 Node* GraphKit::access_load_at(Node* obj, // containing obj
1709 Node* adr, // actual address to store val at
1710 const TypePtr* adr_type,
1711 const Type* val_type,
1712 BasicType bt,
1713 DecoratorSet decorators,
1714 Node* ctl) {
1715 if (stopped()) {
1716 return top(); // Dead path ?
1717 }
1718
1719 C2AccessValuePtr addr(adr, adr_type);
1720 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr, ctl);
1721 if (access.is_raw()) {
1722 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1723 } else {
1724 return _barrier_set->load_at(access, val_type);
1725 }
1726 }
1727
1728 Node* GraphKit::access_load(Node* adr, // actual address to load val at
1729 const Type* val_type,
1730 BasicType bt,
1731 DecoratorSet decorators) {
1732 if (stopped()) {
1733 return top(); // Dead path ?
1734 }
1735
1736 C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());
1737 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, nullptr, addr);
1738 if (access.is_raw()) {
1739 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1740 } else {
1805 Node* new_val,
1806 const Type* value_type,
1807 BasicType bt,
1808 DecoratorSet decorators) {
1809 C2AccessValuePtr addr(adr, adr_type);
1810 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1811 if (access.is_raw()) {
1812 return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1813 } else {
1814 return _barrier_set->atomic_add_at(access, new_val, value_type);
1815 }
1816 }
1817
1818 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
1819 return _barrier_set->clone(this, src, dst, size, is_array);
1820 }
1821
1822 //-------------------------array_element_address-------------------------
1823 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1824 const TypeInt* sizetype, Node* ctrl) {
1825 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
1826 uint shift = arytype->is_flat() ? arytype->flat_log_elem_size() : exact_log2(type2aelembytes(elembt));
1827 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1828
1829 // short-circuit a common case (saves lots of confusing waste motion)
1830 jint idx_con = find_int_con(idx, -1);
1831 if (idx_con >= 0) {
1832 intptr_t offset = header + ((intptr_t)idx_con << shift);
1833 return basic_plus_adr(ary, offset);
1834 }
1835
1836 // must be correct type for alignment purposes
1837 Node* base = basic_plus_adr(ary, header);
1838 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1839 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1840 return basic_plus_adr(ary, base, scale);
1841 }
1842
1843 //-------------------------load_array_element-------------------------
1844 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
1845 const Type* elemtype = arytype->elem();
1846 BasicType elembt = elemtype->array_element_basic_type();
1847 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1848 if (elembt == T_NARROWOOP) {
1849 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1850 }
1851 Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,
1852 IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0));
1853 return ld;
1854 }
1855
1856 //-------------------------set_arguments_for_java_call-------------------------
1857 // Arguments (pre-popped from the stack) are taken from the JVMS.
1858 void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline) {
1859 PreserveReexecuteState preexecs(this);
1860 if (EnableValhalla) {
1861 // Make sure the call is "re-executed", if buffering of inline type arguments triggers deoptimization.
1862 // At this point, the call hasn't been executed yet, so we will only ever execute the call once.
1863 jvms()->set_should_reexecute(true);
1864 int arg_size = method()->get_declared_signature_at_bci(bci())->arg_size_for_bc(java_bc());
1865 inc_sp(arg_size);
1866 }
1867 // Add the call arguments
1868 const TypeTuple* domain = call->tf()->domain_sig();
1869 uint nargs = domain->cnt();
1870 int arg_num = 0;
1871 for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1872 Node* arg = argument(i-TypeFunc::Parms);
1873 const Type* t = domain->field_at(i);
1874 // TODO 8284443 A static call to a mismatched method should still be scalarized
1875 if (t->is_inlinetypeptr() && !call->method()->get_Method()->mismatch() && call->method()->is_scalarized_arg(arg_num)) {
1876 // We don't pass inline type arguments by reference but instead pass each field of the inline type
1877 if (!arg->is_InlineType()) {
1878 assert(_gvn.type(arg)->is_zero_type() && !t->inline_klass()->is_null_free(), "Unexpected argument type");
1879 arg = InlineTypeNode::make_from_oop(this, arg, t->inline_klass(), t->inline_klass()->is_null_free());
1880 }
1881 InlineTypeNode* vt = arg->as_InlineType();
1882 vt->pass_fields(this, call, idx, true, !t->maybe_null());
1883 // If an inline type argument is passed as fields, attach the Method* to the call site
1884 // to be able to access the extended signature later via attached_method_before_pc().
1885 // For example, see CompiledMethod::preserve_callee_argument_oops().
1886 call->set_override_symbolic_info(true);
1887 // Register an evol dependency on the callee method to make sure that this method is deoptimized and
1888 // re-compiled with a non-scalarized calling convention if the callee method is later marked as mismatched.
1889 C->dependencies()->assert_evol_method(call->method());
1890 arg_num++;
1891 continue;
1892 } else if (arg->is_InlineType()) {
1893 // Pass inline type argument via oop to callee
1894 arg = arg->as_InlineType()->buffer(this);
1895 if (!is_late_inline && !arg->as_InlineType()->is_larval()) {
1896 arg = arg->as_InlineType()->get_oop();
1897 }
1898 }
1899 if (t != Type::HALF) {
1900 arg_num++;
1901 }
1902 call->init_req(idx++, arg);
1903 }
1904 }
1905
1906 //---------------------------set_edges_for_java_call---------------------------
1907 // Connect a newly created call into the current JVMS.
1908 // A return value node (if any) is returned from set_edges_for_java_call.
1909 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1910
1911 // Add the predefined inputs:
1912 call->init_req( TypeFunc::Control, control() );
1913 call->init_req( TypeFunc::I_O , i_o() );
1914 call->init_req( TypeFunc::Memory , reset_memory() );
1915 call->init_req( TypeFunc::FramePtr, frameptr() );
1916 call->init_req( TypeFunc::ReturnAdr, top() );
1917
1918 add_safepoint_edges(call, must_throw);
1919
1920 Node* xcall = _gvn.transform(call);
1921
1922 if (xcall == top()) {
1923 set_control(top());
1924 return;
1925 }
1926 assert(xcall == call, "call identity is stable");
1927
1928 // Re-use the current map to produce the result.
1929
1930 set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1931 set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj)));
1932 set_all_memory_call(xcall, separate_io_proj);
1933
1934 //return xcall; // no need, caller already has it
1935 }
1936
1937 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1938 if (stopped()) return top(); // maybe the call folded up?
1939
1940 // Note: Since any out-of-line call can produce an exception,
1941 // we always insert an I_O projection from the call into the result.
1942
1943 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1944
1945 if (separate_io_proj) {
1946 // The caller requested separate projections be used by the fall
1947 // through and exceptional paths, so replace the projections for
1948 // the fall through path.
1949 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1950 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1951 }
1952
1953 // Capture the return value, if any.
1954 Node* ret;
1955 if (call->method() == nullptr || call->method()->return_type()->basic_type() == T_VOID) {
1956 ret = top();
1957 } else if (call->tf()->returns_inline_type_as_fields()) {
1958 // Return of multiple values (inline type fields): we create a
1959 // InlineType node, each field is a projection from the call.
1960 ciInlineKlass* vk = call->method()->return_type()->as_inline_klass();
1961 uint base_input = TypeFunc::Parms;
1962 ret = InlineTypeNode::make_from_multi(this, call, vk, base_input, false, false);
1963 } else {
1964 ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1965 ciType* t = call->method()->return_type();
1966 if (t->is_klass()) {
1967 const Type* type = TypeOopPtr::make_from_klass(t->as_klass());
1968 if (type->is_inlinetypeptr()) {
1969 ret = InlineTypeNode::make_from_oop(this, ret, type->inline_klass(), type->inline_klass()->is_null_free());
1970 }
1971 }
1972 }
1973
1974 // We just called the constructor on a value type receiver. Reload it from the buffer
1975 if (call->method()->is_object_constructor() && call->method()->holder()->is_inlinetype()) {
1976 InlineTypeNode* receiver = call->in(TypeFunc::Parms)->as_InlineType();
1977 assert(receiver->is_larval(), "must be larval");
1978 assert(receiver->is_allocated(&gvn()), "larval must be buffered");
1979 InlineTypeNode* reloaded = InlineTypeNode::make_from_oop(this, receiver->get_oop(), receiver->bottom_type()->inline_klass(), true);
1980 assert(!reloaded->is_larval(), "should not be larval anymore");
1981 replace_in_map(receiver, reloaded);
1982 }
1983
1984 return ret;
1985 }
1986
1987 //--------------------set_predefined_input_for_runtime_call--------------------
1988 // Reading and setting the memory state is way conservative here.
1989 // The real problem is that I am not doing real Type analysis on memory,
1990 // so I cannot distinguish card mark stores from other stores. Across a GC
1991 // point the Store Barrier and the card mark memory has to agree. I cannot
1992 // have a card mark store and its barrier split across the GC point from
1993 // either above or below. Here I get that to happen by reading ALL of memory.
1994 // A better answer would be to separate out card marks from other memory.
1995 // For now, return the input memory state, so that it can be reused
1996 // after the call, if this call has restricted memory effects.
1997 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
1998 // Set fixed predefined input arguments
1999 Node* memory = reset_memory();
2000 Node* m = narrow_mem == nullptr ? memory : narrow_mem;
2001 call->init_req( TypeFunc::Control, control() );
2002 call->init_req( TypeFunc::I_O, top() ); // does no i/o
2003 call->init_req( TypeFunc::Memory, m ); // may gc ptrs
2054 if (use->is_MergeMem()) {
2055 wl.push(use);
2056 }
2057 }
2058 }
2059
2060 // Replace the call with the current state of the kit.
2061 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
2062 JVMState* ejvms = nullptr;
2063 if (has_exceptions()) {
2064 ejvms = transfer_exceptions_into_jvms();
2065 }
2066
2067 ReplacedNodes replaced_nodes = map()->replaced_nodes();
2068 ReplacedNodes replaced_nodes_exception;
2069 Node* ex_ctl = top();
2070
2071 SafePointNode* final_state = stop();
2072
2073 // Find all the needed outputs of this call
2074 CallProjections* callprojs = call->extract_projections(true);
2075
2076 Unique_Node_List wl;
2077 Node* init_mem = call->in(TypeFunc::Memory);
2078 Node* final_mem = final_state->in(TypeFunc::Memory);
2079 Node* final_ctl = final_state->in(TypeFunc::Control);
2080 Node* final_io = final_state->in(TypeFunc::I_O);
2081
2082 // Replace all the old call edges with the edges from the inlining result
2083 if (callprojs->fallthrough_catchproj != nullptr) {
2084 C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl);
2085 }
2086 if (callprojs->fallthrough_memproj != nullptr) {
2087 if (final_mem->is_MergeMem()) {
2088 // Parser's exits MergeMem was not transformed but may be optimized
2089 final_mem = _gvn.transform(final_mem);
2090 }
2091 C->gvn_replace_by(callprojs->fallthrough_memproj, final_mem);
2092 add_mergemem_users_to_worklist(wl, final_mem);
2093 }
2094 if (callprojs->fallthrough_ioproj != nullptr) {
2095 C->gvn_replace_by(callprojs->fallthrough_ioproj, final_io);
2096 }
2097
2098 // Replace the result with the new result if it exists and is used
2099 if (callprojs->resproj[0] != nullptr && result != nullptr) {
2100 // If the inlined code is dead, the result projections for an inline type returned as
2101 // fields have not been replaced. They will go away once the call is replaced by TOP below.
2102 assert(callprojs->nb_resproj == 1 || (call->tf()->returns_inline_type_as_fields() && stopped()),
2103 "unexpected number of results");
2104 C->gvn_replace_by(callprojs->resproj[0], result);
2105 }
2106
2107 if (ejvms == nullptr) {
2108 // No exception edges to simply kill off those paths
2109 if (callprojs->catchall_catchproj != nullptr) {
2110 C->gvn_replace_by(callprojs->catchall_catchproj, C->top());
2111 }
2112 if (callprojs->catchall_memproj != nullptr) {
2113 C->gvn_replace_by(callprojs->catchall_memproj, C->top());
2114 }
2115 if (callprojs->catchall_ioproj != nullptr) {
2116 C->gvn_replace_by(callprojs->catchall_ioproj, C->top());
2117 }
2118 // Replace the old exception object with top
2119 if (callprojs->exobj != nullptr) {
2120 C->gvn_replace_by(callprojs->exobj, C->top());
2121 }
2122 } else {
2123 GraphKit ekit(ejvms);
2124
2125 // Load my combined exception state into the kit, with all phis transformed:
2126 SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
2127 replaced_nodes_exception = ex_map->replaced_nodes();
2128
2129 Node* ex_oop = ekit.use_exception_state(ex_map);
2130
2131 if (callprojs->catchall_catchproj != nullptr) {
2132 C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control());
2133 ex_ctl = ekit.control();
2134 }
2135 if (callprojs->catchall_memproj != nullptr) {
2136 Node* ex_mem = ekit.reset_memory();
2137 C->gvn_replace_by(callprojs->catchall_memproj, ex_mem);
2138 add_mergemem_users_to_worklist(wl, ex_mem);
2139 }
2140 if (callprojs->catchall_ioproj != nullptr) {
2141 C->gvn_replace_by(callprojs->catchall_ioproj, ekit.i_o());
2142 }
2143
2144 // Replace the old exception object with the newly created one
2145 if (callprojs->exobj != nullptr) {
2146 C->gvn_replace_by(callprojs->exobj, ex_oop);
2147 }
2148 }
2149
2150 // Disconnect the call from the graph
2151 call->disconnect_inputs(C);
2152 C->gvn_replace_by(call, C->top());
2153
2154 // Clean up any MergeMems that feed other MergeMems since the
2155 // optimizer doesn't like that.
2156 while (wl.size() > 0) {
2157 _gvn.transform(wl.pop());
2158 }
2159
2160 if (callprojs->fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) {
2161 replaced_nodes.apply(C, final_ctl);
2162 }
2163 if (!ex_ctl->is_top() && do_replaced_nodes) {
2164 replaced_nodes_exception.apply(C, ex_ctl);
2165 }
2166 }
2167
2168
2169 //------------------------------increment_counter------------------------------
2170 // for statistics: increment a VM counter by 1
2171
2172 void GraphKit::increment_counter(address counter_addr) {
2173 Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2174 increment_counter(adr1);
2175 }
2176
2177 void GraphKit::increment_counter(Node* counter_addr) {
2178 int adr_type = Compile::AliasIdxRaw;
2179 Node* ctrl = control();
2180 Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered);
2339 *
2340 * @param n node that the type applies to
2341 * @param exact_kls type from profiling
2342 * @param maybe_null did profiling see null?
2343 *
2344 * @return node with improved type
2345 */
2346 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2347 const Type* current_type = _gvn.type(n);
2348 assert(UseTypeSpeculation, "type speculation must be on");
2349
2350 const TypePtr* speculative = current_type->speculative();
2351
2352 // Should the klass from the profile be recorded in the speculative type?
2353 if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2354 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls, Type::trust_interfaces);
2355 const TypeOopPtr* xtype = tklass->as_instance_type();
2356 assert(xtype->klass_is_exact(), "Should be exact");
2357 // Any reason to believe n is not null (from this profiling or a previous one)?
2358 assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2359 const TypePtr* ptr = (ptr_kind != ProfileNeverNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2360 // record the new speculative type's depth
2361 speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2362 speculative = speculative->with_inline_depth(jvms()->depth());
2363 } else if (current_type->would_improve_ptr(ptr_kind)) {
2364 // Profiling report that null was never seen so we can change the
2365 // speculative type to non null ptr.
2366 if (ptr_kind == ProfileAlwaysNull) {
2367 speculative = TypePtr::NULL_PTR;
2368 } else {
2369 assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2370 const TypePtr* ptr = TypePtr::NOTNULL;
2371 if (speculative != nullptr) {
2372 speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2373 } else {
2374 speculative = ptr;
2375 }
2376 }
2377 }
2378
2379 if (speculative != current_type->speculative()) {
2380 // Build a type with a speculative type (what we think we know
2381 // about the type but will need a guard when we use it)
2382 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative);
2383 // We're changing the type, we need a new CheckCast node to carry
2384 // the new type. The new type depends on the control: what
2385 // profiling tells us is only valid from here as far as we can
2386 // tell.
2387 Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2388 cast = _gvn.transform(cast);
2389 replace_in_map(n, cast);
2390 n = cast;
2391 }
2392
2393 return n;
2394 }
2395
2396 /**
2397 * Record profiling data from receiver profiling at an invoke with the
2398 * type system so that it can propagate it (speculation)
2399 *
2400 * @param n receiver node
2401 *
2402 * @return node with improved type
2403 */
2404 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2405 if (!UseTypeSpeculation) {
2406 return n;
2407 }
2408 ciKlass* exact_kls = profile_has_unique_klass();
2409 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2410 if ((java_bc() == Bytecodes::_checkcast ||
2411 java_bc() == Bytecodes::_instanceof ||
2412 java_bc() == Bytecodes::_aastore) &&
2413 method()->method_data()->is_mature()) {
2414 ciProfileData* data = method()->method_data()->bci_to_data(bci());
2415 if (data != nullptr) {
2416 if (java_bc() == Bytecodes::_aastore) {
2417 ciKlass* array_type = nullptr;
2418 ciKlass* element_type = nullptr;
2419 ProfilePtrKind element_ptr = ProfileMaybeNull;
2420 bool flat_array = true;
2421 bool null_free_array = true;
2422 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
2423 exact_kls = element_type;
2424 ptr_kind = element_ptr;
2425 } else {
2426 if (!data->as_BitData()->null_seen()) {
2427 ptr_kind = ProfileNeverNull;
2428 } else {
2429 assert(data->is_ReceiverTypeData(), "bad profile data type");
2430 ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2431 uint i = 0;
2432 for (; i < call->row_limit(); i++) {
2433 ciKlass* receiver = call->receiver(i);
2434 if (receiver != nullptr) {
2435 break;
2436 }
2437 }
2438 ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2439 }
2440 }
2441 }
2442 }
2443 return record_profile_for_speculation(n, exact_kls, ptr_kind);
2444 }
2445
2446 /**
2447 * Record profiling data from argument profiling at an invoke with the
2448 * type system so that it can propagate it (speculation)
2449 *
2450 * @param dest_method target method for the call
2451 * @param bc what invoke bytecode is this?
2452 */
2453 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2454 if (!UseTypeSpeculation) {
2455 return;
2456 }
2457 const TypeFunc* tf = TypeFunc::make(dest_method);
2458 int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2459 int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2460 for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2461 const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2462 if (is_reference_type(targ->basic_type())) {
2463 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2464 ciKlass* better_type = nullptr;
2465 if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2466 record_profile_for_speculation(argument(j), better_type, ptr_kind);
2467 }
2468 i++;
2469 }
2470 }
2471 }
2472
2473 /**
2474 * Record profiling data from parameter profiling at an invoke with
2475 * the type system so that it can propagate it (speculation)
2476 */
2477 void GraphKit::record_profiled_parameters_for_speculation() {
2478 if (!UseTypeSpeculation) {
2479 return;
2480 }
2481 for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
2495 * the type system so that it can propagate it (speculation)
2496 */
2497 void GraphKit::record_profiled_return_for_speculation() {
2498 if (!UseTypeSpeculation) {
2499 return;
2500 }
2501 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2502 ciKlass* better_type = nullptr;
2503 if (method()->return_profiled_type(bci(), better_type, ptr_kind)) {
2504 // If profiling reports a single type for the return value,
2505 // feed it to the type system so it can propagate it as a
2506 // speculative type
2507 record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind);
2508 }
2509 }
2510
2511 void GraphKit::round_double_arguments(ciMethod* dest_method) {
2512 if (Matcher::strict_fp_requires_explicit_rounding) {
2513 // (Note: TypeFunc::make has a cache that makes this fast.)
2514 const TypeFunc* tf = TypeFunc::make(dest_method);
2515 int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2516 for (int j = 0; j < nargs; j++) {
2517 const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2518 if (targ->basic_type() == T_DOUBLE) {
2519 // If any parameters are doubles, they must be rounded before
2520 // the call, dprecision_rounding does gvn.transform
2521 Node *arg = argument(j);
2522 arg = dprecision_rounding(arg);
2523 set_argument(j, arg);
2524 }
2525 }
2526 }
2527 }
2528
2529 // rounding for strict float precision conformance
2530 Node* GraphKit::precision_rounding(Node* n) {
2531 if (Matcher::strict_fp_requires_explicit_rounding) {
2532 #ifdef IA32
2533 if (UseSSE == 0) {
2534 return _gvn.transform(new RoundFloatNode(0, n));
2535 }
2536 #else
2537 Unimplemented();
2646 // The first null ends the list.
2647 Node* parm0, Node* parm1,
2648 Node* parm2, Node* parm3,
2649 Node* parm4, Node* parm5,
2650 Node* parm6, Node* parm7) {
2651 assert(call_addr != nullptr, "must not call null targets");
2652
2653 // Slow-path call
2654 bool is_leaf = !(flags & RC_NO_LEAF);
2655 bool has_io = (!is_leaf && !(flags & RC_NO_IO));
2656 if (call_name == nullptr) {
2657 assert(!is_leaf, "must supply name for leaf");
2658 call_name = OptoRuntime::stub_name(call_addr);
2659 }
2660 CallNode* call;
2661 if (!is_leaf) {
2662 call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
2663 } else if (flags & RC_NO_FP) {
2664 call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2665 } else if (flags & RC_VECTOR){
2666 uint num_bits = call_type->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
2667 call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
2668 } else {
2669 call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2670 }
2671
2672 // The following is similar to set_edges_for_java_call,
2673 // except that the memory effects of the call are restricted to AliasIdxRaw.
2674
2675 // Slow path call has no side-effects, uses few values
2676 bool wide_in = !(flags & RC_NARROW_MEM);
2677 bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2678
2679 Node* prev_mem = nullptr;
2680 if (wide_in) {
2681 prev_mem = set_predefined_input_for_runtime_call(call);
2682 } else {
2683 assert(!wide_out, "narrow in => narrow out");
2684 Node* narrow_mem = memory(adr_type);
2685 prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2686 }
2726
2727 if (has_io) {
2728 set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2729 }
2730 return call;
2731
2732 }
2733
2734 // i2b
2735 Node* GraphKit::sign_extend_byte(Node* in) {
2736 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
2737 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
2738 }
2739
2740 // i2s
2741 Node* GraphKit::sign_extend_short(Node* in) {
2742 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
2743 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
2744 }
2745
2746
2747 //------------------------------merge_memory-----------------------------------
2748 // Merge memory from one path into the current memory state.
2749 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
2750 for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
2751 Node* old_slice = mms.force_memory();
2752 Node* new_slice = mms.memory2();
2753 if (old_slice != new_slice) {
2754 PhiNode* phi;
2755 if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
2756 if (mms.is_empty()) {
2757 // clone base memory Phi's inputs for this memory slice
2758 assert(old_slice == mms.base_memory(), "sanity");
2759 phi = PhiNode::make(region, nullptr, Type::MEMORY, mms.adr_type(C));
2760 _gvn.set_type(phi, Type::MEMORY);
2761 for (uint i = 1; i < phi->req(); i++) {
2762 phi->init_req(i, old_slice->in(i));
2763 }
2764 } else {
2765 phi = old_slice->as_Phi(); // Phi was generated already
2766 }
3023
3024 // Now do a linear scan of the secondary super-klass array. Again, no real
3025 // performance impact (too rare) but it's gotta be done.
3026 // Since the code is rarely used, there is no penalty for moving it
3027 // out of line, and it can only improve I-cache density.
3028 // The decision to inline or out-of-line this final check is platform
3029 // dependent, and is found in the AD file definition of PartialSubtypeCheck.
3030 Node* psc = gvn.transform(
3031 new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
3032
3033 IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
3034 r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
3035 r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
3036
3037 // Return false path; set default control to true path.
3038 *ctrl = gvn.transform(r_ok_subtype);
3039 return gvn.transform(r_not_subtype);
3040 }
3041
3042 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
3043 const Type* sub_t = _gvn.type(obj_or_subklass);
3044 if (sub_t->make_oopptr() != nullptr && sub_t->make_oopptr()->is_inlinetypeptr()) {
3045 sub_t = TypeKlassPtr::make(sub_t->inline_klass());
3046 obj_or_subklass = makecon(sub_t);
3047 }
3048 bool expand_subtype_check = C->post_loop_opts_phase() || // macro node expansion is over
3049 ExpandSubTypeCheckAtParseTime; // forced expansion
3050 if (expand_subtype_check) {
3051 MergeMemNode* mem = merged_memory();
3052 Node* ctrl = control();
3053 Node* subklass = obj_or_subklass;
3054 if (!sub_t->isa_klassptr()) {
3055 subklass = load_object_klass(obj_or_subklass);
3056 }
3057
3058 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn, method(), bci());
3059 set_control(ctrl);
3060 return n;
3061 }
3062
3063 Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass, method(), bci()));
3064 Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
3065 IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
3066 set_control(_gvn.transform(new IfTrueNode(iff)));
3067 return _gvn.transform(new IfFalseNode(iff));
3068 }
3069
3070 // Profile-driven exact type check:
3071 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
3072 float prob, Node* *casted_receiver) {
3073 assert(!klass->is_interface(), "no exact type check on interfaces");
3074 Node* fail = top();
3075 const Type* rec_t = _gvn.type(receiver);
3076 if (rec_t->is_inlinetypeptr()) {
3077 if (klass->equals(rec_t->inline_klass())) {
3078 (*casted_receiver) = receiver; // Always passes
3079 } else {
3080 (*casted_receiver) = top(); // Always fails
3081 fail = control();
3082 set_control(top());
3083 }
3084 return fail;
3085 }
3086 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces);
3087 Node* recv_klass = load_object_klass(receiver);
3088 fail = type_check(recv_klass, tklass, prob);
3089
3090 if (!stopped()) {
3091 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3092 const TypeOopPtr* recv_xtype = tklass->as_instance_type();
3093 assert(recv_xtype->klass_is_exact(), "");
3094
3095 if (!receiver_type->higher_equal(recv_xtype)) { // ignore redundant casts
3096 // Subsume downstream occurrences of receiver with a cast to
3097 // recv_xtype, since now we know what the type will be.
3098 Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype);
3099 Node* res = _gvn.transform(cast);
3100 if (recv_xtype->is_inlinetypeptr()) {
3101 assert(!gvn().type(res)->maybe_null(), "receiver should never be null");
3102 res = InlineTypeNode::make_from_oop(this, res, recv_xtype->inline_klass());
3103 }
3104 (*casted_receiver) = res;
3105 assert(!(*casted_receiver)->is_top(), "that path should be unreachable");
3106 // (User must make the replace_in_map call.)
3107 }
3108 }
3109
3110 return fail;
3111 }
3112
3113 Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass,
3114 float prob) {
3115 Node* want_klass = makecon(tklass);
3116 Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
3117 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
3118 IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
3119 set_control(_gvn.transform(new IfTrueNode (iff)));
3120 Node* fail = _gvn.transform(new IfFalseNode(iff));
3121 return fail;
3122 }
3123
3124 //------------------------------subtype_check_receiver-------------------------
3125 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
3126 Node** casted_receiver) {
3127 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces)->try_improve();
3128 Node* want_klass = makecon(tklass);
3129
3130 Node* slow_ctl = gen_subtype_check(receiver, want_klass);
3131
3132 // Ignore interface type information until interface types are properly tracked.
3133 if (!stopped() && !klass->is_interface()) {
3134 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3135 const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
3136 if (receiver_type != nullptr && !receiver_type->higher_equal(recv_type)) { // ignore redundant casts
3137 Node* cast = _gvn.transform(new CheckCastPPNode(control(), receiver, recv_type));
3138 if (recv_type->is_inlinetypeptr()) {
3139 cast = InlineTypeNode::make_from_oop(this, cast, recv_type->inline_klass());
3140 }
3141 (*casted_receiver) = cast;
3142 }
3143 }
3144
3145 return slow_ctl;
3146 }
3147
3148 //------------------------------seems_never_null-------------------------------
3149 // Use null_seen information if it is available from the profile.
3150 // If we see an unexpected null at a type check we record it and force a
3151 // recompile; the offending check will be recompiled to handle nulls.
3152 // If we see several offending BCIs, then all checks in the
3153 // method will be recompiled.
3154 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
3155 speculating = !_gvn.type(obj)->speculative_maybe_null();
3156 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
3157 if (UncommonNullCast // Cutout for this technique
3158 && obj != null() // And not the -Xcomp stupid case?
3159 && !too_many_traps(reason)
3160 ) {
3161 if (speculating) {
3230
3231 //------------------------maybe_cast_profiled_receiver-------------------------
3232 // If the profile has seen exactly one type, narrow to exactly that type.
3233 // Subsequent type checks will always fold up.
3234 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3235 const TypeKlassPtr* require_klass,
3236 ciKlass* spec_klass,
3237 bool safe_for_replace) {
3238 if (!UseTypeProfile || !TypeProfileCasts) return nullptr;
3239
3240 Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != nullptr);
3241
3242 // Make sure we haven't already deoptimized from this tactic.
3243 if (too_many_traps_or_recompiles(reason))
3244 return nullptr;
3245
3246 // (No, this isn't a call, but it's enough like a virtual call
3247 // to use the same ciMethod accessor to get the profile info...)
3248 // If we have a speculative type use it instead of profiling (which
3249 // may not help us)
3250 ciKlass* exact_kls = spec_klass;
3251 if (exact_kls == nullptr) {
3252 if (java_bc() == Bytecodes::_aastore) {
3253 ciKlass* array_type = nullptr;
3254 ciKlass* element_type = nullptr;
3255 ProfilePtrKind element_ptr = ProfileMaybeNull;
3256 bool flat_array = true;
3257 bool null_free_array = true;
3258 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
3259 exact_kls = element_type;
3260 } else {
3261 exact_kls = profile_has_unique_klass();
3262 }
3263 }
3264 if (exact_kls != nullptr) {// no cast failures here
3265 if (require_klass == nullptr ||
3266 C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) {
3267 // If we narrow the type to match what the type profile sees or
3268 // the speculative type, we can then remove the rest of the
3269 // cast.
3270 // This is a win, even if the exact_kls is very specific,
3271 // because downstream operations, such as method calls,
3272 // will often benefit from the sharper type.
3273 Node* exact_obj = not_null_obj; // will get updated in place...
3274 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
3275 &exact_obj);
3276 { PreserveJVMState pjvms(this);
3277 set_control(slow_ctl);
3278 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3279 }
3280 if (safe_for_replace) {
3281 replace_in_map(not_null_obj, exact_obj);
3282 }
3283 return exact_obj;
3373 // If not_null_obj is dead, only null-path is taken
3374 if (stopped()) { // Doing instance-of on a null?
3375 set_control(null_ctl);
3376 return intcon(0);
3377 }
3378 region->init_req(_null_path, null_ctl);
3379 phi ->init_req(_null_path, intcon(0)); // Set null path value
3380 if (null_ctl == top()) {
3381 // Do this eagerly, so that pattern matches like is_diamond_phi
3382 // will work even during parsing.
3383 assert(_null_path == PATH_LIMIT-1, "delete last");
3384 region->del_req(_null_path);
3385 phi ->del_req(_null_path);
3386 }
3387
3388 // Do we know the type check always succeed?
3389 bool known_statically = false;
3390 if (_gvn.type(superklass)->singleton()) {
3391 const TypeKlassPtr* superk = _gvn.type(superklass)->is_klassptr();
3392 const TypeKlassPtr* subk = _gvn.type(obj)->is_oopptr()->as_klass_type();
3393 if (subk != nullptr && subk->is_loaded()) {
3394 int static_res = C->static_subtype_check(superk, subk);
3395 known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
3396 }
3397 }
3398
3399 if (!known_statically) {
3400 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3401 // We may not have profiling here or it may not help us. If we
3402 // have a speculative type use it to perform an exact cast.
3403 ciKlass* spec_obj_type = obj_type->speculative_type();
3404 if (spec_obj_type != nullptr || (ProfileDynamicTypes && data != nullptr)) {
3405 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, nullptr, spec_obj_type, safe_for_replace);
3406 if (stopped()) { // Profile disagrees with this path.
3407 set_control(null_ctl); // Null is the only remaining possibility.
3408 return intcon(0);
3409 }
3410 if (cast_obj != nullptr) {
3411 not_null_obj = cast_obj;
3412 }
3413 }
3429 record_for_igvn(region);
3430
3431 // If we know the type check always succeeds then we don't use the
3432 // profiling data at this bytecode. Don't lose it, feed it to the
3433 // type system as a speculative type.
3434 if (safe_for_replace) {
3435 Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3436 replace_in_map(obj, casted_obj);
3437 }
3438
3439 return _gvn.transform(phi);
3440 }
3441
3442 //-------------------------------gen_checkcast---------------------------------
3443 // Generate a checkcast idiom. Used by both the checkcast bytecode and the
3444 // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
3445 // uncommon-trap paths work. Adjust stack after this call.
3446 // If failure_control is supplied and not null, it is filled in with
3447 // the control edge for the cast failure. Otherwise, an appropriate
3448 // uncommon trap or exception is thrown.
3449 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_control, bool null_free) {
3450 kill_dead_locals(); // Benefit all the uncommon traps
3451 const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr()->try_improve();
3452 const TypeOopPtr *toop = tk->cast_to_exactness(false)->as_instance_type();
3453 bool safe_for_replace = (failure_control == nullptr);
3454 assert(!null_free || toop->is_inlinetypeptr(), "must be an inline type pointer");
3455
3456 // Fast cutout: Check the case that the cast is vacuously true.
3457 // This detects the common cases where the test will short-circuit
3458 // away completely. We do this before we perform the null check,
3459 // because if the test is going to turn into zero code, we don't
3460 // want a residual null check left around. (Causes a slowdown,
3461 // for example, in some objArray manipulations, such as a[i]=a[j].)
3462 if (tk->singleton()) {
3463 const TypeKlassPtr* kptr = nullptr;
3464 const Type* t = _gvn.type(obj);
3465 if (t->isa_oop_ptr()) {
3466 kptr = t->is_oopptr()->as_klass_type();
3467 } else if (obj->is_InlineType()) {
3468 ciInlineKlass* vk = t->inline_klass();
3469 kptr = TypeInstKlassPtr::make(TypePtr::NotNull, vk, Type::Offset(0));
3470 }
3471 if (kptr != nullptr) {
3472 switch (C->static_subtype_check(tk, kptr)) {
3473 case Compile::SSC_always_true:
3474 // If we know the type check always succeed then we don't use
3475 // the profiling data at this bytecode. Don't lose it, feed it
3476 // to the type system as a speculative type.
3477 obj = record_profiled_receiver_for_speculation(obj);
3478 if (null_free) {
3479 assert(safe_for_replace, "must be");
3480 obj = null_check(obj);
3481 }
3482 assert(stopped() || !toop->is_inlinetypeptr() || obj->is_InlineType(), "should have been scalarized");
3483 return obj;
3484 case Compile::SSC_always_false:
3485 if (null_free) {
3486 assert(safe_for_replace, "must be");
3487 obj = null_check(obj);
3488 }
3489 // It needs a null check because a null will *pass* the cast check.
3490 if (t->isa_oopptr() != nullptr && !t->is_oopptr()->maybe_null()) {
3491 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3492 Deoptimization::DeoptReason reason = is_aastore ?
3493 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3494 builtin_throw(reason);
3495 return top();
3496 } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3497 return null_assert(obj);
3498 }
3499 break; // Fall through to full check
3500 default:
3501 break;
3502 }
3503 }
3504 }
3505
3506 ciProfileData* data = nullptr;
3507 if (failure_control == nullptr) { // use MDO in regular case only
3508 assert(java_bc() == Bytecodes::_aastore ||
3509 java_bc() == Bytecodes::_checkcast,
3510 "interpreter profiles type checks only for these BCs");
3511 if (method()->method_data()->is_mature()) {
3512 data = method()->method_data()->bci_to_data(bci());
3513 }
3514 }
3515
3516 // Make the merge point
3517 enum { _obj_path = 1, _null_path, PATH_LIMIT };
3518 RegionNode* region = new RegionNode(PATH_LIMIT);
3519 Node* phi = new PhiNode(region, toop);
3520 _gvn.set_type(region, Type::CONTROL);
3521 _gvn.set_type(phi, toop);
3522
3523 C->set_has_split_ifs(true); // Has chance for split-if optimization
3524
3525 // Use null-cast information if it is available
3526 bool speculative_not_null = false;
3527 bool never_see_null = ((failure_control == nullptr) // regular case only
3528 && seems_never_null(obj, data, speculative_not_null));
3529
3530 if (obj->is_InlineType()) {
3531 // Re-execute if buffering during triggers deoptimization
3532 PreserveReexecuteState preexecs(this);
3533 jvms()->set_should_reexecute(true);
3534 obj = obj->as_InlineType()->buffer(this, safe_for_replace);
3535 }
3536
3537 // Null check; get casted pointer; set region slot 3
3538 Node* null_ctl = top();
3539 Node* not_null_obj = nullptr;
3540 if (null_free) {
3541 assert(safe_for_replace, "must be");
3542 not_null_obj = null_check(obj);
3543 } else {
3544 not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3545 }
3546
3547 // If not_null_obj is dead, only null-path is taken
3548 if (stopped()) { // Doing instance-of on a null?
3549 set_control(null_ctl);
3550 if (toop->is_inlinetypeptr()) {
3551 return InlineTypeNode::make_null(_gvn, toop->inline_klass());
3552 }
3553 return null();
3554 }
3555 region->init_req(_null_path, null_ctl);
3556 phi ->init_req(_null_path, null()); // Set null path value
3557 if (null_ctl == top()) {
3558 // Do this eagerly, so that pattern matches like is_diamond_phi
3559 // will work even during parsing.
3560 assert(_null_path == PATH_LIMIT-1, "delete last");
3561 region->del_req(_null_path);
3562 phi ->del_req(_null_path);
3563 }
3564
3565 Node* cast_obj = nullptr;
3566 if (tk->klass_is_exact()) {
3567 // The following optimization tries to statically cast the speculative type of the object
3568 // (for example obtained during profiling) to the type of the superklass and then do a
3569 // dynamic check that the type of the object is what we expect. To work correctly
3570 // for checkcast and aastore the type of superklass should be exact.
3571 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3572 // We may not have profiling here or it may not help us. If we have
3573 // a speculative type use it to perform an exact cast.
3574 ciKlass* spec_obj_type = obj_type->speculative_type();
3575 if (spec_obj_type != nullptr || data != nullptr) {
3576 cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk, spec_obj_type, safe_for_replace);
3577 if (cast_obj != nullptr) {
3578 if (failure_control != nullptr) // failure is now impossible
3579 (*failure_control) = top();
3580 // adjust the type of the phi to the exact klass:
3581 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3582 }
3583 }
3584 }
3585
3586 if (cast_obj == nullptr) {
3587 // Generate the subtype check
3588 Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass);
3589
3590 // Plug in success path into the merge
3591 cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3592 // Failure path ends in uncommon trap (or may be dead - failure impossible)
3593 if (failure_control == nullptr) {
3594 if (not_subtype_ctrl != top()) { // If failure is possible
3595 PreserveJVMState pjvms(this);
3596 set_control(not_subtype_ctrl);
3597 Node* obj_klass = nullptr;
3598 if (not_null_obj->is_InlineType()) {
3599 obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->inline_klass()));
3600 } else {
3601 obj_klass = load_object_klass(not_null_obj);
3602 }
3603 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3604 Deoptimization::DeoptReason reason = is_aastore ?
3605 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3606 builtin_throw(reason);
3607 }
3608 } else {
3609 (*failure_control) = not_subtype_ctrl;
3610 }
3611 }
3612
3613 region->init_req(_obj_path, control());
3614 phi ->init_req(_obj_path, cast_obj);
3615
3616 // A merge of null or Casted-NotNull obj
3617 Node* res = _gvn.transform(phi);
3618
3619 // Note I do NOT always 'replace_in_map(obj,result)' here.
3620 // if( tk->klass()->can_be_primary_super() )
3621 // This means that if I successfully store an Object into an array-of-String
3622 // I 'forget' that the Object is really now known to be a String. I have to
3623 // do this because we don't have true union types for interfaces - if I store
3624 // a Baz into an array-of-Interface and then tell the optimizer it's an
3625 // Interface, I forget that it's also a Baz and cannot do Baz-like field
3626 // references to it. FIX THIS WHEN UNION TYPES APPEAR!
3627 // replace_in_map( obj, res );
3628
3629 // Return final merged results
3630 set_control( _gvn.transform(region) );
3631 record_for_igvn(region);
3632
3633 bool not_inline = !toop->can_be_inline_type();
3634 bool not_flat_in_array = !UseFlatArray || not_inline || (toop->is_inlinetypeptr() && !toop->inline_klass()->flat_in_array());
3635 if (EnableValhalla && not_flat_in_array) {
3636 // Check if obj has been loaded from an array
3637 obj = obj->isa_DecodeN() ? obj->in(1) : obj;
3638 Node* array = nullptr;
3639 if (obj->isa_Load()) {
3640 Node* address = obj->in(MemNode::Address);
3641 if (address->isa_AddP()) {
3642 array = address->as_AddP()->in(AddPNode::Base);
3643 }
3644 } else if (obj->is_Phi()) {
3645 Node* region = obj->in(0);
3646 // TODO make this more robust (see JDK-8231346)
3647 if (region->req() == 3 && region->in(2) != nullptr && region->in(2)->in(0) != nullptr) {
3648 IfNode* iff = region->in(2)->in(0)->isa_If();
3649 if (iff != nullptr) {
3650 iff->is_flat_array_check(&_gvn, &array);
3651 }
3652 }
3653 }
3654 if (array != nullptr) {
3655 const TypeAryPtr* ary_t = _gvn.type(array)->isa_aryptr();
3656 if (ary_t != nullptr && !ary_t->is_flat()) {
3657 if (!ary_t->is_not_null_free() && not_inline) {
3658 // Casting array element to a non-inline-type, mark array as not null-free.
3659 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_null_free()));
3660 replace_in_map(array, cast);
3661 } else if (!ary_t->is_not_flat()) {
3662 // Casting array element to a non-flat type, mark array as not flat.
3663 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_flat()));
3664 replace_in_map(array, cast);
3665 }
3666 }
3667 }
3668 }
3669
3670 if (!stopped() && !res->is_InlineType()) {
3671 res = record_profiled_receiver_for_speculation(res);
3672 if (toop->is_inlinetypeptr()) {
3673 Node* vt = InlineTypeNode::make_from_oop(this, res, toop->inline_klass(), !gvn().type(res)->maybe_null());
3674 res = vt;
3675 if (safe_for_replace) {
3676 replace_in_map(obj, vt);
3677 replace_in_map(not_null_obj, vt);
3678 replace_in_map(res, vt);
3679 }
3680 }
3681 }
3682 return res;
3683 }
3684
3685 Node* GraphKit::inline_type_test(Node* obj, bool is_inline) {
3686 Node* mark_adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3687 Node* mark = make_load(nullptr, mark_adr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
3688 Node* mask = MakeConX(markWord::inline_type_pattern);
3689 Node* masked = _gvn.transform(new AndXNode(mark, mask));
3690 Node* cmp = _gvn.transform(new CmpXNode(masked, mask));
3691 return _gvn.transform(new BoolNode(cmp, is_inline ? BoolTest::eq : BoolTest::ne));
3692 }
3693
3694 Node* GraphKit::array_lh_test(Node* klass, jint mask, jint val, bool eq) {
3695 Node* lh_adr = basic_plus_adr(klass, in_bytes(Klass::layout_helper_offset()));
3696 // Make sure to use immutable memory here to enable hoisting the check out of loops
3697 Node* lh_val = _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), lh_adr, lh_adr->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered));
3698 Node* masked = _gvn.transform(new AndINode(lh_val, intcon(mask)));
3699 Node* cmp = _gvn.transform(new CmpINode(masked, intcon(val)));
3700 return _gvn.transform(new BoolNode(cmp, eq ? BoolTest::eq : BoolTest::ne));
3701 }
3702
3703 // TODO 8325106 With JEP 401, flatness is not a property of the Class anymore.
3704 Node* GraphKit::flat_array_test(Node* array_or_klass, bool flat) {
3705 // We can't use immutable memory here because the mark word is mutable.
3706 // PhaseIdealLoop::move_flat_array_check_out_of_loop will make sure the
3707 // check is moved out of loops (mainly to enable loop unswitching).
3708 Node* mem = UseArrayMarkWordCheck ? memory(Compile::AliasIdxRaw) : immutable_memory();
3709 Node* cmp = _gvn.transform(new FlatArrayCheckNode(C, mem, array_or_klass));
3710 record_for_igvn(cmp); // Give it a chance to be optimized out by IGVN
3711 return _gvn.transform(new BoolNode(cmp, flat ? BoolTest::eq : BoolTest::ne));
3712 }
3713
3714 Node* GraphKit::null_free_array_test(Node* klass, bool null_free) {
3715 return array_lh_test(klass, Klass::_lh_null_free_array_bit_inplace, 0, !null_free);
3716 }
3717
3718 // Deoptimize if 'ary' is a null-free inline type array and 'val' is null
3719 Node* GraphKit::inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace) {
3720 RegionNode* region = new RegionNode(3);
3721 Node* null_ctl = top();
3722 null_check_oop(val, &null_ctl);
3723 if (null_ctl != top()) {
3724 PreserveJVMState pjvms(this);
3725 set_control(null_ctl);
3726 {
3727 // Deoptimize if null-free array
3728 BuildCutout unless(this, null_free_array_test(load_object_klass(ary), /* null_free = */ false), PROB_MAX);
3729 inc_sp(nargs);
3730 uncommon_trap(Deoptimization::Reason_null_check,
3731 Deoptimization::Action_none);
3732 }
3733 region->init_req(1, control());
3734 }
3735 region->init_req(2, control());
3736 set_control(_gvn.transform(region));
3737 record_for_igvn(region);
3738 if (_gvn.type(val) == TypePtr::NULL_PTR) {
3739 // Since we were just successfully storing null, the array can't be null free.
3740 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
3741 ary_t = ary_t->cast_to_not_null_free();
3742 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
3743 if (safe_for_replace) {
3744 replace_in_map(ary, cast);
3745 }
3746 ary = cast;
3747 }
3748 return ary;
3749 }
3750
3751 //------------------------------next_monitor-----------------------------------
3752 // What number should be given to the next monitor?
3753 int GraphKit::next_monitor() {
3754 int current = jvms()->monitor_depth()* C->sync_stack_slots();
3755 int next = current + C->sync_stack_slots();
3756 // Keep the toplevel high water mark current:
3757 if (C->fixed_slots() < next) C->set_fixed_slots(next);
3758 return current;
3759 }
3760
3761 //------------------------------insert_mem_bar---------------------------------
3762 // Memory barrier to avoid floating things around
3763 // The membar serves as a pinch point between both control and all memory slices.
3764 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3765 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3766 mb->init_req(TypeFunc::Control, control());
3767 mb->init_req(TypeFunc::Memory, reset_memory());
3768 Node* membar = _gvn.transform(mb);
3796 }
3797 Node* membar = _gvn.transform(mb);
3798 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3799 if (alias_idx == Compile::AliasIdxBot) {
3800 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3801 } else {
3802 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3803 }
3804 return membar;
3805 }
3806
3807 //------------------------------shared_lock------------------------------------
3808 // Emit locking code.
3809 FastLockNode* GraphKit::shared_lock(Node* obj) {
3810 // bci is either a monitorenter bc or InvocationEntryBci
3811 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3812 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3813
3814 if( !GenerateSynchronizationCode )
3815 return nullptr; // Not locking things?
3816
3817 if (stopped()) // Dead monitor?
3818 return nullptr;
3819
3820 assert(dead_locals_are_killed(), "should kill locals before sync. point");
3821
3822 // Box the stack location
3823 Node* box = _gvn.transform(new BoxLockNode(next_monitor()));
3824 Node* mem = reset_memory();
3825
3826 FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
3827
3828 // Create the rtm counters for this fast lock if needed.
3829 flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3830
3831 // Add monitor to debug info for the slow path. If we block inside the
3832 // slow path and de-opt, we need the monitor hanging around
3833 map()->push_monitor( flock );
3834
3835 const TypeFunc *tf = LockNode::lock_type();
3836 LockNode *lock = new LockNode(C, tf);
3865 }
3866 #endif
3867
3868 return flock;
3869 }
3870
3871
3872 //------------------------------shared_unlock----------------------------------
3873 // Emit unlocking code.
3874 void GraphKit::shared_unlock(Node* box, Node* obj) {
3875 // bci is either a monitorenter bc or InvocationEntryBci
3876 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3877 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3878
3879 if( !GenerateSynchronizationCode )
3880 return;
3881 if (stopped()) { // Dead monitor?
3882 map()->pop_monitor(); // Kill monitor from debug info
3883 return;
3884 }
3885 assert(!obj->is_InlineType(), "should not unlock on inline type");
3886
3887 // Memory barrier to avoid floating things down past the locked region
3888 insert_mem_bar(Op_MemBarReleaseLock);
3889
3890 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3891 UnlockNode *unlock = new UnlockNode(C, tf);
3892 #ifdef ASSERT
3893 unlock->set_dbg_jvms(sync_jvms());
3894 #endif
3895 uint raw_idx = Compile::AliasIdxRaw;
3896 unlock->init_req( TypeFunc::Control, control() );
3897 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3898 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3899 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3900 unlock->init_req( TypeFunc::ReturnAdr, top() );
3901
3902 unlock->init_req(TypeFunc::Parms + 0, obj);
3903 unlock->init_req(TypeFunc::Parms + 1, box);
3904 unlock = _gvn.transform(unlock)->as_Unlock();
3905
3906 Node* mem = reset_memory();
3907
3908 // unlock has no side-effects, sets few values
3909 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3910
3911 // Kill monitor from debug info
3912 map()->pop_monitor( );
3913 }
3914
3915 //-------------------------------get_layout_helper-----------------------------
3916 // If the given klass is a constant or known to be an array,
3917 // fetch the constant layout helper value into constant_value
3918 // and return null. Otherwise, load the non-constant
3919 // layout helper value, and return the node which represents it.
3920 // This two-faced routine is useful because allocation sites
3921 // almost always feature constant types.
3922 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3923 const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
3924 if (!StressReflectiveCode && klass_t != nullptr) {
3925 bool xklass = klass_t->klass_is_exact();
3926 bool can_be_flat = false;
3927 const TypeAryPtr* ary_type = klass_t->as_instance_type()->isa_aryptr();
3928 if (UseFlatArray && !xklass && ary_type != nullptr && !ary_type->is_null_free()) {
3929 // TODO 8325106 Fix comment
3930 // The runtime type of [LMyValue might be [QMyValue due to [QMyValue <: [LMyValue. Don't constant fold.
3931 const TypeOopPtr* elem = ary_type->elem()->make_oopptr();
3932 can_be_flat = ary_type->can_be_inline_array() && (!elem->is_inlinetypeptr() || elem->inline_klass()->flat_in_array());
3933 }
3934 if (!can_be_flat && (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM))) {
3935 jint lhelper;
3936 if (klass_t->is_flat()) {
3937 lhelper = ary_type->flat_layout_helper();
3938 } else if (klass_t->isa_aryklassptr()) {
3939 BasicType elem = ary_type->elem()->array_element_basic_type();
3940 if (is_reference_type(elem, true)) {
3941 elem = T_OBJECT;
3942 }
3943 lhelper = Klass::array_layout_helper(elem);
3944 } else {
3945 lhelper = klass_t->is_instklassptr()->exact_klass()->layout_helper();
3946 }
3947 if (lhelper != Klass::_lh_neutral_value) {
3948 constant_value = lhelper;
3949 return (Node*) nullptr;
3950 }
3951 }
3952 }
3953 constant_value = Klass::_lh_neutral_value; // put in a known value
3954 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3955 return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered);
3956 }
3957
3958 // We just put in an allocate/initialize with a big raw-memory effect.
3959 // Hook selected additional alias categories on the initialization.
3960 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3961 MergeMemNode* init_in_merge,
3962 Node* init_out_raw) {
3963 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
3964 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
3965
3966 Node* prevmem = kit.memory(alias_idx);
3967 init_in_merge->set_memory_at(alias_idx, prevmem);
3968 if (init_out_raw != nullptr) {
3969 kit.set_memory(init_out_raw, alias_idx);
3970 }
3971 }
3972
3973 //---------------------------set_output_for_allocation-------------------------
3974 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
3975 const TypeOopPtr* oop_type,
3976 bool deoptimize_on_exception) {
3977 int rawidx = Compile::AliasIdxRaw;
3978 alloc->set_req( TypeFunc::FramePtr, frameptr() );
3979 add_safepoint_edges(alloc);
3980 Node* allocx = _gvn.transform(alloc);
3981 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
3982 // create memory projection for i_o
3983 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
3984 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
3985
3986 // create a memory projection as for the normal control path
3987 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3988 set_memory(malloc, rawidx);
3989
3990 // a normal slow-call doesn't change i_o, but an allocation does
3991 // we create a separate i_o projection for the normal control path
3992 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3993 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3994
3995 // put in an initialization barrier
3996 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
3997 rawoop)->as_Initialize();
3998 assert(alloc->initialization() == init, "2-way macro link must work");
3999 assert(init ->allocation() == alloc, "2-way macro link must work");
4000 {
4001 // Extract memory strands which may participate in the new object's
4002 // initialization, and source them from the new InitializeNode.
4003 // This will allow us to observe initializations when they occur,
4004 // and link them properly (as a group) to the InitializeNode.
4005 assert(init->in(InitializeNode::Memory) == malloc, "");
4006 MergeMemNode* minit_in = MergeMemNode::make(malloc);
4007 init->set_req(InitializeNode::Memory, minit_in);
4008 record_for_igvn(minit_in); // fold it up later, if possible
4009 _gvn.set_type(minit_in, Type::MEMORY);
4010 Node* minit_out = memory(rawidx);
4011 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
4012 // Add an edge in the MergeMem for the header fields so an access
4013 // to one of those has correct memory state
4014 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
4015 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
4016 if (oop_type->isa_aryptr()) {
4017 const TypeAryPtr* arytype = oop_type->is_aryptr();
4018 if (arytype->is_flat()) {
4019 // Initially all flat array accesses share a single slice
4020 // but that changes after parsing. Prepare the memory graph so
4021 // it can optimize flat array accesses properly once they
4022 // don't share a single slice.
4023 assert(C->flat_accesses_share_alias(), "should be set at parse time");
4024 C->set_flat_accesses_share_alias(false);
4025 ciInlineKlass* vk = arytype->elem()->inline_klass();
4026 for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) {
4027 ciField* field = vk->nonstatic_field_at(i);
4028 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
4029 continue; // do not bother to track really large numbers of fields
4030 int off_in_vt = field->offset_in_bytes() - vk->first_field_offset();
4031 const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
4032 int fieldidx = C->get_alias_index(adr_type, true);
4033 // Pass nullptr for init_out. Having per flat array element field memory edges as uses of the Initialize node
4034 // can result in per flat array field Phis to be created which confuses the logic of
4035 // Compile::adjust_flat_array_access_aliases().
4036 hook_memory_on_init(*this, fieldidx, minit_in, nullptr);
4037 }
4038 C->set_flat_accesses_share_alias(true);
4039 hook_memory_on_init(*this, C->get_alias_index(TypeAryPtr::INLINES), minit_in, minit_out);
4040 } else {
4041 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
4042 int elemidx = C->get_alias_index(telemref);
4043 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
4044 }
4045 } else if (oop_type->isa_instptr()) {
4046 set_memory(minit_out, C->get_alias_index(oop_type)); // mark word
4047 ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
4048 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
4049 ciField* field = ik->nonstatic_field_at(i);
4050 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
4051 continue; // do not bother to track really large numbers of fields
4052 // Find (or create) the alias category for this field:
4053 int fieldidx = C->alias_type(field)->index();
4054 hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
4055 }
4056 }
4057 }
4058
4059 // Cast raw oop to the real thing...
4060 Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
4061 javaoop = _gvn.transform(javaoop);
4062 C->set_recent_alloc(control(), javaoop);
4063 assert(just_allocated_object(control()) == javaoop, "just allocated");
4064
4065 #ifdef ASSERT
4066 { // Verify that the AllocateNode::Ideal_allocation recognizers work:
4077 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
4078 }
4079 }
4080 #endif //ASSERT
4081
4082 return javaoop;
4083 }
4084
4085 //---------------------------new_instance--------------------------------------
4086 // This routine takes a klass_node which may be constant (for a static type)
4087 // or may be non-constant (for reflective code). It will work equally well
4088 // for either, and the graph will fold nicely if the optimizer later reduces
4089 // the type to a constant.
4090 // The optional arguments are for specialized use by intrinsics:
4091 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
4092 // - If 'return_size_val', report the total object size to the caller.
4093 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
4094 Node* GraphKit::new_instance(Node* klass_node,
4095 Node* extra_slow_test,
4096 Node* *return_size_val,
4097 bool deoptimize_on_exception,
4098 InlineTypeNode* inline_type_node) {
4099 // Compute size in doublewords
4100 // The size is always an integral number of doublewords, represented
4101 // as a positive bytewise size stored in the klass's layout_helper.
4102 // The layout_helper also encodes (in a low bit) the need for a slow path.
4103 jint layout_con = Klass::_lh_neutral_value;
4104 Node* layout_val = get_layout_helper(klass_node, layout_con);
4105 bool layout_is_con = (layout_val == nullptr);
4106
4107 if (extra_slow_test == nullptr) extra_slow_test = intcon(0);
4108 // Generate the initial go-slow test. It's either ALWAYS (return a
4109 // Node for 1) or NEVER (return a null) or perhaps (in the reflective
4110 // case) a computed value derived from the layout_helper.
4111 Node* initial_slow_test = nullptr;
4112 if (layout_is_con) {
4113 assert(!StressReflectiveCode, "stress mode does not use these paths");
4114 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
4115 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
4116 } else { // reflective case
4117 // This reflective path is used by Unsafe.allocateInstance.
4118 // (It may be stress-tested by specifying StressReflectiveCode.)
4119 // Basically, we want to get into the VM is there's an illegal argument.
4120 Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
4121 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
4122 if (extra_slow_test != intcon(0)) {
4123 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
4124 }
4125 // (Macro-expander will further convert this to a Bool, if necessary.)
4136
4137 // Clear the low bits to extract layout_helper_size_in_bytes:
4138 assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
4139 Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
4140 size = _gvn.transform( new AndXNode(size, mask) );
4141 }
4142 if (return_size_val != nullptr) {
4143 (*return_size_val) = size;
4144 }
4145
4146 // This is a precise notnull oop of the klass.
4147 // (Actually, it need not be precise if this is a reflective allocation.)
4148 // It's what we cast the result to.
4149 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
4150 if (!tklass) tklass = TypeInstKlassPtr::OBJECT;
4151 const TypeOopPtr* oop_type = tklass->as_instance_type();
4152
4153 // Now generate allocation code
4154
4155 // The entire memory state is needed for slow path of the allocation
4156 // since GC and deoptimization can happen.
4157 Node *mem = reset_memory();
4158 set_all_memory(mem); // Create new memory state
4159
4160 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
4161 control(), mem, i_o(),
4162 size, klass_node,
4163 initial_slow_test, inline_type_node);
4164
4165 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
4166 }
4167
4168 //-------------------------------new_array-------------------------------------
4169 // helper for newarray and anewarray
4170 // The 'length' parameter is (obviously) the length of the array.
4171 // The optional arguments are for specialized use by intrinsics:
4172 // - If 'return_size_val', report the non-padded array size (sum of header size
4173 // and array body) to the caller.
4174 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
4175 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
4176 Node* length, // number of array elements
4177 int nargs, // number of arguments to push back for uncommon trap
4178 Node* *return_size_val,
4179 bool deoptimize_on_exception) {
4180 jint layout_con = Klass::_lh_neutral_value;
4181 Node* layout_val = get_layout_helper(klass_node, layout_con);
4182 bool layout_is_con = (layout_val == nullptr);
4183
4184 if (!layout_is_con && !StressReflectiveCode &&
4185 !too_many_traps(Deoptimization::Reason_class_check)) {
4186 // This is a reflective array creation site.
4187 // Optimistically assume that it is a subtype of Object[],
4188 // so that we can fold up all the address arithmetic.
4189 layout_con = Klass::array_layout_helper(T_OBJECT);
4190 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
4191 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
4192 { BuildCutout unless(this, bol_lh, PROB_MAX);
4193 inc_sp(nargs);
4194 uncommon_trap(Deoptimization::Reason_class_check,
4195 Deoptimization::Action_maybe_recompile);
4196 }
4197 layout_val = nullptr;
4198 layout_is_con = true;
4199 }
4200
4201 // Generate the initial go-slow test. Make sure we do not overflow
4202 // if length is huge (near 2Gig) or negative! We do not need
4203 // exact double-words here, just a close approximation of needed
4204 // double-words. We can't add any offset or rounding bits, lest we
4205 // take a size -1 of bytes and make it positive. Use an unsigned
4206 // compare, so negative sizes look hugely positive.
4207 int fast_size_limit = FastAllocateSizeLimit;
4208 if (layout_is_con) {
4209 assert(!StressReflectiveCode, "stress mode does not use these paths");
4210 // Increase the size limit if we have exact knowledge of array type.
4211 int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
4212 fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0);
4213 }
4214
4215 Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
4216 Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
4217
4218 // --- Size Computation ---
4219 // array_size = round_to_heap(array_header + (length << elem_shift));
4220 // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
4221 // and align_to(x, y) == ((x + y-1) & ~(y-1))
4222 // The rounding mask is strength-reduced, if possible.
4223 int round_mask = MinObjAlignmentInBytes - 1;
4224 Node* header_size = nullptr;
4225 // (T_BYTE has the weakest alignment and size restrictions...)
4226 if (layout_is_con) {
4227 int hsize = Klass::layout_helper_header_size(layout_con);
4228 int eshift = Klass::layout_helper_log2_element_size(layout_con);
4229 bool is_flat_array = Klass::layout_helper_is_flatArray(layout_con);
4230 if ((round_mask & ~right_n_bits(eshift)) == 0)
4231 round_mask = 0; // strength-reduce it if it goes away completely
4232 assert(is_flat_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
4233 int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
4234 assert(header_size_min <= hsize, "generic minimum is smallest");
4235 header_size = intcon(hsize);
4236 } else {
4237 Node* hss = intcon(Klass::_lh_header_size_shift);
4238 Node* hsm = intcon(Klass::_lh_header_size_mask);
4239 header_size = _gvn.transform(new URShiftINode(layout_val, hss));
4240 header_size = _gvn.transform(new AndINode(header_size, hsm));
4241 }
4242
4243 Node* elem_shift = nullptr;
4244 if (layout_is_con) {
4245 int eshift = Klass::layout_helper_log2_element_size(layout_con);
4246 if (eshift != 0)
4247 elem_shift = intcon(eshift);
4248 } else {
4249 // There is no need to mask or shift this value.
4250 // The semantics of LShiftINode include an implicit mask to 0x1F.
4251 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
4252 elem_shift = layout_val;
4299 }
4300 Node* non_rounded_size = _gvn.transform(new AddXNode(headerx, abody));
4301
4302 if (return_size_val != nullptr) {
4303 // This is the size
4304 (*return_size_val) = non_rounded_size;
4305 }
4306
4307 Node* size = non_rounded_size;
4308 if (round_mask != 0) {
4309 Node* mask1 = MakeConX(round_mask);
4310 size = _gvn.transform(new AddXNode(size, mask1));
4311 Node* mask2 = MakeConX(~round_mask);
4312 size = _gvn.transform(new AndXNode(size, mask2));
4313 }
4314 // else if round_mask == 0, the size computation is self-rounding
4315
4316 // Now generate allocation code
4317
4318 // The entire memory state is needed for slow path of the allocation
4319 // since GC and deoptimization can happen.
4320 Node *mem = reset_memory();
4321 set_all_memory(mem); // Create new memory state
4322
4323 if (initial_slow_test->is_Bool()) {
4324 // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
4325 initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
4326 }
4327
4328 const TypeKlassPtr* ary_klass = _gvn.type(klass_node)->isa_klassptr();
4329 const TypeOopPtr* ary_type = ary_klass->as_instance_type();
4330 const TypeAryPtr* ary_ptr = ary_type->isa_aryptr();
4331
4332 // TODO 8325106 Fix comment
4333 // Inline type array variants:
4334 // - null-ok: MyValue.ref[] (ciObjArrayKlass "[LMyValue")
4335 // - null-free: MyValue.val[] (ciObjArrayKlass "[QMyValue")
4336 // - null-free, flat : MyValue.val[] (ciFlatArrayKlass "[QMyValue")
4337 // Check if array is a null-free, non-flat inline type array
4338 // that needs to be initialized with the default inline type.
4339 Node* default_value = nullptr;
4340 Node* raw_default_value = nullptr;
4341 if (ary_ptr != nullptr && ary_ptr->klass_is_exact()) {
4342 // Array type is known
4343 if (ary_ptr->is_null_free() && !ary_ptr->is_flat()) {
4344 ciInlineKlass* vk = ary_ptr->elem()->inline_klass();
4345 default_value = InlineTypeNode::default_oop(gvn(), vk);
4346 if (UseCompressedOops) {
4347 // With compressed oops, the 64-bit init value is built from two 32-bit compressed oops
4348 default_value = _gvn.transform(new EncodePNode(default_value, default_value->bottom_type()->make_narrowoop()));
4349 Node* lower = _gvn.transform(new CastP2XNode(control(), default_value));
4350 Node* upper = _gvn.transform(new LShiftLNode(lower, intcon(32)));
4351 raw_default_value = _gvn.transform(new OrLNode(lower, upper));
4352 } else {
4353 raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value));
4354 }
4355 }
4356 }
4357
4358 Node* valid_length_test = _gvn.intcon(1);
4359 if (ary_type->isa_aryptr()) {
4360 BasicType bt = ary_type->isa_aryptr()->elem()->array_element_basic_type();
4361 jint max = TypeAryPtr::max_array_length(bt);
4362 Node* valid_length_cmp = _gvn.transform(new CmpUNode(length, intcon(max)));
4363 valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le));
4364 }
4365
4366 // Create the AllocateArrayNode and its result projections
4367 AllocateArrayNode* alloc
4368 = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
4369 control(), mem, i_o(),
4370 size, klass_node,
4371 initial_slow_test,
4372 length, valid_length_test,
4373 default_value, raw_default_value);
4374 // Cast to correct type. Note that the klass_node may be constant or not,
4375 // and in the latter case the actual array type will be inexact also.
4376 // (This happens via a non-constant argument to inline_native_newArray.)
4377 // In any case, the value of klass_node provides the desired array type.
4378 const TypeInt* length_type = _gvn.find_int_type(length);
4379 if (ary_type->isa_aryptr() && length_type != nullptr) {
4380 // Try to get a better type than POS for the size
4381 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4382 }
4383
4384 Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
4385
4386 array_ideal_length(alloc, ary_type, true);
4387 return javaoop;
4388 }
4389
4390 // The following "Ideal_foo" functions are placed here because they recognize
4391 // the graph shapes created by the functions immediately above.
4392
4393 //---------------------------Ideal_allocation----------------------------------
4500 set_all_memory(ideal.merged_memory());
4501 set_i_o(ideal.i_o());
4502 set_control(ideal.ctrl());
4503 }
4504
4505 void GraphKit::final_sync(IdealKit& ideal) {
4506 // Final sync IdealKit and graphKit.
4507 sync_kit(ideal);
4508 }
4509
4510 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4511 Node* len = load_array_length(load_String_value(str, set_ctrl));
4512 Node* coder = load_String_coder(str, set_ctrl);
4513 // Divide length by 2 if coder is UTF16
4514 return _gvn.transform(new RShiftINode(len, coder));
4515 }
4516
4517 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4518 int value_offset = java_lang_String::value_offset();
4519 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4520 false, nullptr, Type::Offset(0));
4521 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4522 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4523 TypeAry::make(TypeInt::BYTE, TypeInt::POS, false, false, true, true),
4524 ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0));
4525 Node* p = basic_plus_adr(str, str, value_offset);
4526 Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4527 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4528 return load;
4529 }
4530
4531 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4532 if (!CompactStrings) {
4533 return intcon(java_lang_String::CODER_UTF16);
4534 }
4535 int coder_offset = java_lang_String::coder_offset();
4536 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4537 false, nullptr, Type::Offset(0));
4538 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4539
4540 Node* p = basic_plus_adr(str, str, coder_offset);
4541 Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4542 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4543 return load;
4544 }
4545
4546 void GraphKit::store_String_value(Node* str, Node* value) {
4547 int value_offset = java_lang_String::value_offset();
4548 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4549 false, nullptr, Type::Offset(0));
4550 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4551
4552 access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,
4553 value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4554 }
4555
4556 void GraphKit::store_String_coder(Node* str, Node* value) {
4557 int coder_offset = java_lang_String::coder_offset();
4558 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4559 false, nullptr, Type::Offset(0));
4560 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4561
4562 access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4563 value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4564 }
4565
4566 // Capture src and dst memory state with a MergeMemNode
4567 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4568 if (src_type == dst_type) {
4569 // Types are equal, we don't need a MergeMemNode
4570 return memory(src_type);
4571 }
4572 MergeMemNode* merge = MergeMemNode::make(map()->memory());
4573 record_for_igvn(merge); // fold it up later, if possible
4574 int src_idx = C->get_alias_index(src_type);
4575 int dst_idx = C->get_alias_index(dst_type);
4576 merge->set_memory_at(src_idx, memory(src_idx));
4577 merge->set_memory_at(dst_idx, memory(dst_idx));
4578 return merge;
4579 }
4652 i_char->init_req(2, AddI(i_char, intcon(2)));
4653
4654 set_control(IfFalse(iff));
4655 set_memory(st, TypeAryPtr::BYTES);
4656 }
4657
4658 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4659 if (!field->is_constant()) {
4660 return nullptr; // Field not marked as constant.
4661 }
4662 ciInstance* holder = nullptr;
4663 if (!field->is_static()) {
4664 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4665 if (const_oop != nullptr && const_oop->is_instance()) {
4666 holder = const_oop->as_instance();
4667 }
4668 }
4669 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4670 /*is_unsigned_load=*/false);
4671 if (con_type != nullptr) {
4672 Node* con = makecon(con_type);
4673 if (field->type()->is_inlinetype()) {
4674 con = InlineTypeNode::make_from_oop(this, con, field->type()->as_inline_klass(), field->is_null_free());
4675 } else if (con_type->is_inlinetypeptr()) {
4676 con = InlineTypeNode::make_from_oop(this, con, con_type->inline_klass(), field->is_null_free());
4677 }
4678 return con;
4679 }
4680 return nullptr;
4681 }
4682
4683 //---------------------------load_mirror_from_klass----------------------------
4684 // Given a klass oop, load its java mirror (a java.lang.Class oop).
4685 Node* GraphKit::load_mirror_from_klass(Node* klass) {
4686 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
4687 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4688 // mirror = ((OopHandle)mirror)->resolve();
4689 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
4690 }
|