6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/ciUtilities.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "asm/register.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "gc/shared/c2/barrierSetC2.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "opto/addnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/convertnode.hpp"
38 #include "opto/graphKit.hpp"
39 #include "opto/idealKit.hpp"
40 #include "opto/intrinsicnode.hpp"
41 #include "opto/locknode.hpp"
42 #include "opto/machnode.hpp"
43 #include "opto/opaquenode.hpp"
44 #include "opto/parse.hpp"
45 #include "opto/rootnode.hpp"
46 #include "opto/runtime.hpp"
47 #include "opto/subtypenode.hpp"
48 #include "runtime/deoptimization.hpp"
49 #include "runtime/sharedRuntime.hpp"
50 #include "utilities/bitMap.inline.hpp"
51 #include "utilities/powerOfTwo.hpp"
52 #include "utilities/growableArray.hpp"
53
54 //----------------------------GraphKit-----------------------------------------
55 // Main utility constructor.
56 GraphKit::GraphKit(JVMState* jvms)
57 : Phase(Phase::Parser),
58 _env(C->env()),
59 _gvn(*C->initial_gvn()),
60 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
61 {
62 _exceptions = jvms->map()->next_exception();
63 if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr);
64 set_jvms(jvms);
65 }
66
67 // Private constructor for parser.
68 GraphKit::GraphKit()
69 : Phase(Phase::Parser),
70 _env(C->env()),
71 _gvn(*C->initial_gvn()),
72 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
73 {
74 _exceptions = nullptr;
75 set_map(nullptr);
76 debug_only(_sp = -99);
77 debug_only(set_bci(-99));
78 }
79
80
81
82 //---------------------------clean_stack---------------------------------------
83 // Clear away rubbish from the stack area of the JVM state.
84 // This destroys any arguments that may be waiting on the stack.
840 if (PrintMiscellaneous && (Verbose || WizardMode)) {
841 tty->print_cr("Zombie local %d: ", local);
842 jvms->dump();
843 }
844 return false;
845 }
846 }
847 }
848 return true;
849 }
850
851 #endif //ASSERT
852
853 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.
854 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
855 ciMethod* cur_method = jvms->method();
856 int cur_bci = jvms->bci();
857 if (cur_method != nullptr && cur_bci != InvocationEntryBci) {
858 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
859 return Interpreter::bytecode_should_reexecute(code) ||
860 (is_anewarray && code == Bytecodes::_multianewarray);
861 // Reexecute _multianewarray bytecode which was replaced with
862 // sequence of [a]newarray. See Parse::do_multianewarray().
863 //
864 // Note: interpreter should not have it set since this optimization
865 // is limited by dimensions and guarded by flag so in some cases
866 // multianewarray() runtime calls will be generated and
867 // the bytecode should not be reexecutes (stack will not be reset).
868 } else {
869 return false;
870 }
871 }
872
873 // Helper function for adding JVMState and debug information to node
874 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
875 // Add the safepoint edges to the call (or other safepoint).
876
877 // Make sure dead locals are set to top. This
878 // should help register allocation time and cut down on the size
879 // of the deoptimization information.
880 assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
931 }
932
933 // Presize the call:
934 DEBUG_ONLY(uint non_debug_edges = call->req());
935 call->add_req_batch(top(), youngest_jvms->debug_depth());
936 assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
937
938 // Set up edges so that the call looks like this:
939 // Call [state:] ctl io mem fptr retadr
940 // [parms:] parm0 ... parmN
941 // [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
942 // [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
943 // [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
944 // Note that caller debug info precedes callee debug info.
945
946 // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
947 uint debug_ptr = call->req();
948
949 // Loop over the map input edges associated with jvms, add them
950 // to the call node, & reset all offsets to match call node array.
951 for (JVMState* in_jvms = youngest_jvms; in_jvms != nullptr; ) {
952 uint debug_end = debug_ptr;
953 uint debug_start = debug_ptr - in_jvms->debug_size();
954 debug_ptr = debug_start; // back up the ptr
955
956 uint p = debug_start; // walks forward in [debug_start, debug_end)
957 uint j, k, l;
958 SafePointNode* in_map = in_jvms->map();
959 out_jvms->set_map(call);
960
961 if (can_prune_locals) {
962 assert(in_jvms->method() == out_jvms->method(), "sanity");
963 // If the current throw can reach an exception handler in this JVMS,
964 // then we must keep everything live that can reach that handler.
965 // As a quick and dirty approximation, we look for any handlers at all.
966 if (in_jvms->method()->has_exception_handlers()) {
967 can_prune_locals = false;
968 }
969 }
970
971 // Add the Locals
972 k = in_jvms->locoff();
973 l = in_jvms->loc_size();
974 out_jvms->set_locoff(p);
975 if (!can_prune_locals) {
976 for (j = 0; j < l; j++)
977 call->set_req(p++, in_map->in(k+j));
978 } else {
979 p += l; // already set to top above by add_req_batch
980 }
981
982 // Add the Expression Stack
983 k = in_jvms->stkoff();
984 l = in_jvms->sp();
985 out_jvms->set_stkoff(p);
986 if (!can_prune_locals) {
987 for (j = 0; j < l; j++)
988 call->set_req(p++, in_map->in(k+j));
989 } else if (can_prune_locals && stack_slots_not_pruned != 0) {
990 // Divide stack into {S0,...,S1}, where S0 is set to top.
991 uint s1 = stack_slots_not_pruned;
992 stack_slots_not_pruned = 0; // for next iteration
993 if (s1 > l) s1 = l;
994 uint s0 = l - s1;
995 p += s0; // skip the tops preinstalled by add_req_batch
996 for (j = s0; j < l; j++)
997 call->set_req(p++, in_map->in(k+j));
998 } else {
999 p += l; // already set to top above by add_req_batch
1000 }
1001
1002 // Add the Monitors
1003 k = in_jvms->monoff();
1004 l = in_jvms->mon_size();
1005 out_jvms->set_monoff(p);
1006 for (j = 0; j < l; j++)
1007 call->set_req(p++, in_map->in(k+j));
1008
1009 // Copy any scalar object fields.
1010 k = in_jvms->scloff();
1011 l = in_jvms->scl_size();
1012 out_jvms->set_scloff(p);
1013 for (j = 0; j < l; j++)
1014 call->set_req(p++, in_map->in(k+j));
1015
1016 // Finish the new jvms.
1017 out_jvms->set_endoff(p);
1018
1019 assert(out_jvms->endoff() == debug_end, "fill ptr must match");
1020 assert(out_jvms->depth() == in_jvms->depth(), "depth must match");
1021 assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match");
1022 assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match");
1023 assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match");
1024 assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
1025
1026 // Update the two tail pointers in parallel.
1027 out_jvms = out_jvms->caller();
1028 in_jvms = in_jvms->caller();
1029 }
1030
1031 assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
1032
1033 // Test the correctness of JVMState::debug_xxx accessors:
1034 assert(call->jvms()->debug_start() == non_debug_edges, "");
1035 assert(call->jvms()->debug_end() == call->req(), "");
1036 assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
1037 }
1038
1039 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
1040 Bytecodes::Code code = java_bc();
1041 if (code == Bytecodes::_wide) {
1042 code = method()->java_code_at_bci(bci() + 1);
1043 }
1044
1045 if (code != Bytecodes::_illegal) {
1046 depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
1182 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1183 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1184 return _gvn.transform( new AndLNode(conv, mask) );
1185 }
1186
1187 Node* GraphKit::ConvL2I(Node* offset) {
1188 // short-circuit a common case
1189 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1190 if (offset_con != (jlong)Type::OffsetBot) {
1191 return intcon((int) offset_con);
1192 }
1193 return _gvn.transform( new ConvL2INode(offset));
1194 }
1195
1196 //-------------------------load_object_klass-----------------------------------
1197 Node* GraphKit::load_object_klass(Node* obj) {
1198 // Special-case a fresh allocation to avoid building nodes:
1199 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1200 if (akls != nullptr) return akls;
1201 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1202 return _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), k_adr, TypeInstPtr::KLASS));
1203 }
1204
1205 //-------------------------load_array_length-----------------------------------
1206 Node* GraphKit::load_array_length(Node* array) {
1207 // Special-case a fresh allocation to avoid building nodes:
1208 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array);
1209 Node *alen;
1210 if (alloc == nullptr) {
1211 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1212 alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
1213 } else {
1214 alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false);
1215 }
1216 return alen;
1217 }
1218
1219 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc,
1220 const TypeOopPtr* oop_type,
1221 bool replace_length_in_map) {
1222 Node* length = alloc->Ideal_length();
1231 replace_in_map(length, ccast);
1232 }
1233 return ccast;
1234 }
1235 }
1236 return length;
1237 }
1238
1239 //------------------------------do_null_check----------------------------------
1240 // Helper function to do a null pointer check. Returned value is
1241 // the incoming address with null casted away. You are allowed to use the
1242 // not-null value only if you are control dependent on the test.
1243 #ifndef PRODUCT
1244 extern uint explicit_null_checks_inserted,
1245 explicit_null_checks_elided;
1246 #endif
1247 Node* GraphKit::null_check_common(Node* value, BasicType type,
1248 // optional arguments for variations:
1249 bool assert_null,
1250 Node* *null_control,
1251 bool speculative) {
1252 assert(!assert_null || null_control == nullptr, "not both at once");
1253 if (stopped()) return top();
1254 NOT_PRODUCT(explicit_null_checks_inserted++);
1255
1256 // Construct null check
1257 Node *chk = nullptr;
1258 switch(type) {
1259 case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1260 case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;
1261 case T_ARRAY : // fall through
1262 type = T_OBJECT; // simplify further tests
1263 case T_OBJECT : {
1264 const Type *t = _gvn.type( value );
1265
1266 const TypeOopPtr* tp = t->isa_oopptr();
1267 if (tp != nullptr && !tp->is_loaded()
1268 // Only for do_null_check, not any of its siblings:
1269 && !assert_null && null_control == nullptr) {
1270 // Usually, any field access or invocation on an unloaded oop type
1271 // will simply fail to link, since the statically linked class is
1272 // likely also to be unloaded. However, in -Xcomp mode, sometimes
1273 // the static class is loaded but the sharper oop type is not.
1274 // Rather than checking for this obscure case in lots of places,
1275 // we simply observe that a null check on an unloaded class
1339 }
1340 Node *oldcontrol = control();
1341 set_control(cfg);
1342 Node *res = cast_not_null(value);
1343 set_control(oldcontrol);
1344 NOT_PRODUCT(explicit_null_checks_elided++);
1345 return res;
1346 }
1347 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1348 if (cfg == nullptr) break; // Quit at region nodes
1349 depth++;
1350 }
1351 }
1352
1353 //-----------
1354 // Branch to failure if null
1355 float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
1356 Deoptimization::DeoptReason reason;
1357 if (assert_null) {
1358 reason = Deoptimization::reason_null_assert(speculative);
1359 } else if (type == T_OBJECT) {
1360 reason = Deoptimization::reason_null_check(speculative);
1361 } else {
1362 reason = Deoptimization::Reason_div0_check;
1363 }
1364 // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1365 // ciMethodData::has_trap_at will return a conservative -1 if any
1366 // must-be-null assertion has failed. This could cause performance
1367 // problems for a method after its first do_null_assert failure.
1368 // Consider using 'Reason_class_check' instead?
1369
1370 // To cause an implicit null check, we set the not-null probability
1371 // to the maximum (PROB_MAX). For an explicit check the probability
1372 // is set to a smaller value.
1373 if (null_control != nullptr || too_many_traps(reason)) {
1374 // probability is less likely
1375 ok_prob = PROB_LIKELY_MAG(3);
1376 } else if (!assert_null &&
1377 (ImplicitNullCheckThreshold > 0) &&
1378 method() != nullptr &&
1379 (method()->method_data()->trap_count(reason)
1413 }
1414
1415 if (assert_null) {
1416 // Cast obj to null on this path.
1417 replace_in_map(value, zerocon(type));
1418 return zerocon(type);
1419 }
1420
1421 // Cast obj to not-null on this path, if there is no null_control.
1422 // (If there is a null_control, a non-null value may come back to haunt us.)
1423 if (type == T_OBJECT) {
1424 Node* cast = cast_not_null(value, false);
1425 if (null_control == nullptr || (*null_control) == top())
1426 replace_in_map(value, cast);
1427 value = cast;
1428 }
1429
1430 return value;
1431 }
1432
1433
1434 //------------------------------cast_not_null----------------------------------
1435 // Cast obj to not-null on this path
1436 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1437 const Type *t = _gvn.type(obj);
1438 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1439 // Object is already not-null?
1440 if( t == t_not_null ) return obj;
1441
1442 Node* cast = new CastPPNode(control(), obj,t_not_null);
1443 cast = _gvn.transform( cast );
1444
1445 // Scan for instances of 'obj' in the current JVM mapping.
1446 // These instances are known to be not-null after the test.
1447 if (do_replace_in_map)
1448 replace_in_map(obj, cast);
1449
1450 return cast; // Return casted value
1451 }
1452
1453 // Sometimes in intrinsics, we implicitly know an object is not null
1454 // (there's no actual null check) so we can cast it to not null. In
1455 // the course of optimizations, the input to the cast can become null.
1456 // In that case that data path will die and we need the control path
1542 // These are layered on top of the factory methods in LoadNode and StoreNode,
1543 // and integrate with the parser's memory state and _gvn engine.
1544 //
1545
1546 // factory methods in "int adr_idx"
1547 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1548 int adr_idx,
1549 MemNode::MemOrd mo,
1550 LoadNode::ControlDependency control_dependency,
1551 bool require_atomic_access,
1552 bool unaligned,
1553 bool mismatched,
1554 bool unsafe,
1555 uint8_t barrier_data) {
1556 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1557 const TypePtr* adr_type = nullptr; // debug-mode-only argument
1558 debug_only(adr_type = C->get_adr_type(adr_idx));
1559 Node* mem = memory(adr_idx);
1560 Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
1561 ld = _gvn.transform(ld);
1562 if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1563 // Improve graph before escape analysis and boxing elimination.
1564 record_for_igvn(ld);
1565 if (ld->is_DecodeN()) {
1566 // Also record the actual load (LoadN) in case ld is DecodeN
1567 assert(ld->in(1)->Opcode() == Op_LoadN, "Assumption invalid: input to DecodeN is not LoadN");
1568 record_for_igvn(ld->in(1));
1569 }
1570 }
1571 return ld;
1572 }
1573
1574 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1575 int adr_idx,
1576 MemNode::MemOrd mo,
1577 bool require_atomic_access,
1578 bool unaligned,
1579 bool mismatched,
1580 bool unsafe,
1581 int barrier_data) {
1593 if (unsafe) {
1594 st->as_Store()->set_unsafe_access();
1595 }
1596 st->as_Store()->set_barrier_data(barrier_data);
1597 st = _gvn.transform(st);
1598 set_memory(st, adr_idx);
1599 // Back-to-back stores can only remove intermediate store with DU info
1600 // so push on worklist for optimizer.
1601 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1602 record_for_igvn(st);
1603
1604 return st;
1605 }
1606
1607 Node* GraphKit::access_store_at(Node* obj,
1608 Node* adr,
1609 const TypePtr* adr_type,
1610 Node* val,
1611 const Type* val_type,
1612 BasicType bt,
1613 DecoratorSet decorators) {
1614 // Transformation of a value which could be null pointer (CastPP #null)
1615 // could be delayed during Parse (for example, in adjust_map_after_if()).
1616 // Execute transformation here to avoid barrier generation in such case.
1617 if (_gvn.type(val) == TypePtr::NULL_PTR) {
1618 val = _gvn.makecon(TypePtr::NULL_PTR);
1619 }
1620
1621 if (stopped()) {
1622 return top(); // Dead path ?
1623 }
1624
1625 assert(val != nullptr, "not dead path");
1626
1627 C2AccessValuePtr addr(adr, adr_type);
1628 C2AccessValue value(val, val_type);
1629 C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1630 if (access.is_raw()) {
1631 return _barrier_set->BarrierSetC2::store_at(access, value);
1632 } else {
1633 return _barrier_set->store_at(access, value);
1634 }
1635 }
1636
1637 Node* GraphKit::access_load_at(Node* obj, // containing obj
1638 Node* adr, // actual address to store val at
1639 const TypePtr* adr_type,
1640 const Type* val_type,
1641 BasicType bt,
1642 DecoratorSet decorators) {
1643 if (stopped()) {
1644 return top(); // Dead path ?
1645 }
1646
1647 C2AccessValuePtr addr(adr, adr_type);
1648 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr);
1649 if (access.is_raw()) {
1650 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1651 } else {
1652 return _barrier_set->load_at(access, val_type);
1653 }
1654 }
1655
1656 Node* GraphKit::access_load(Node* adr, // actual address to load val at
1657 const Type* val_type,
1658 BasicType bt,
1659 DecoratorSet decorators) {
1660 if (stopped()) {
1661 return top(); // Dead path ?
1662 }
1663
1664 C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());
1665 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, nullptr, addr);
1666 if (access.is_raw()) {
1667 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1668 } else {
1733 Node* new_val,
1734 const Type* value_type,
1735 BasicType bt,
1736 DecoratorSet decorators) {
1737 C2AccessValuePtr addr(adr, adr_type);
1738 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1739 if (access.is_raw()) {
1740 return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1741 } else {
1742 return _barrier_set->atomic_add_at(access, new_val, value_type);
1743 }
1744 }
1745
1746 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
1747 return _barrier_set->clone(this, src, dst, size, is_array);
1748 }
1749
1750 //-------------------------array_element_address-------------------------
1751 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1752 const TypeInt* sizetype, Node* ctrl) {
1753 uint shift = exact_log2(type2aelembytes(elembt));
1754 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1755
1756 // short-circuit a common case (saves lots of confusing waste motion)
1757 jint idx_con = find_int_con(idx, -1);
1758 if (idx_con >= 0) {
1759 intptr_t offset = header + ((intptr_t)idx_con << shift);
1760 return basic_plus_adr(ary, offset);
1761 }
1762
1763 // must be correct type for alignment purposes
1764 Node* base = basic_plus_adr(ary, header);
1765 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1766 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1767 return basic_plus_adr(ary, base, scale);
1768 }
1769
1770 //-------------------------load_array_element-------------------------
1771 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
1772 const Type* elemtype = arytype->elem();
1773 BasicType elembt = elemtype->array_element_basic_type();
1774 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1775 if (elembt == T_NARROWOOP) {
1776 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1777 }
1778 Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,
1779 IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0));
1780 return ld;
1781 }
1782
1783 //-------------------------set_arguments_for_java_call-------------------------
1784 // Arguments (pre-popped from the stack) are taken from the JVMS.
1785 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1786 // Add the call arguments:
1787 uint nargs = call->method()->arg_size();
1788 for (uint i = 0; i < nargs; i++) {
1789 Node* arg = argument(i);
1790 call->init_req(i + TypeFunc::Parms, arg);
1791 }
1792 }
1793
1794 //---------------------------set_edges_for_java_call---------------------------
1795 // Connect a newly created call into the current JVMS.
1796 // A return value node (if any) is returned from set_edges_for_java_call.
1797 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1798
1799 // Add the predefined inputs:
1800 call->init_req( TypeFunc::Control, control() );
1801 call->init_req( TypeFunc::I_O , i_o() );
1802 call->init_req( TypeFunc::Memory , reset_memory() );
1803 call->init_req( TypeFunc::FramePtr, frameptr() );
1804 call->init_req( TypeFunc::ReturnAdr, top() );
1805
1806 add_safepoint_edges(call, must_throw);
1807
1808 Node* xcall = _gvn.transform(call);
1809
1810 if (xcall == top()) {
1811 set_control(top());
1812 return;
1813 }
1814 assert(xcall == call, "call identity is stable");
1815
1816 // Re-use the current map to produce the result.
1817
1818 set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1819 set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj)));
1820 set_all_memory_call(xcall, separate_io_proj);
1821
1822 //return xcall; // no need, caller already has it
1823 }
1824
1825 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1826 if (stopped()) return top(); // maybe the call folded up?
1827
1828 // Capture the return value, if any.
1829 Node* ret;
1830 if (call->method() == nullptr ||
1831 call->method()->return_type()->basic_type() == T_VOID)
1832 ret = top();
1833 else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1834
1835 // Note: Since any out-of-line call can produce an exception,
1836 // we always insert an I_O projection from the call into the result.
1837
1838 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1839
1840 if (separate_io_proj) {
1841 // The caller requested separate projections be used by the fall
1842 // through and exceptional paths, so replace the projections for
1843 // the fall through path.
1844 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1845 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1846 }
1847 return ret;
1848 }
1849
1850 //--------------------set_predefined_input_for_runtime_call--------------------
1851 // Reading and setting the memory state is way conservative here.
1852 // The real problem is that I am not doing real Type analysis on memory,
1853 // so I cannot distinguish card mark stores from other stores. Across a GC
1854 // point the Store Barrier and the card mark memory has to agree. I cannot
1855 // have a card mark store and its barrier split across the GC point from
1856 // either above or below. Here I get that to happen by reading ALL of memory.
1857 // A better answer would be to separate out card marks from other memory.
1858 // For now, return the input memory state, so that it can be reused
1859 // after the call, if this call has restricted memory effects.
1860 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
1861 // Set fixed predefined input arguments
1862 Node* memory = reset_memory();
1863 Node* m = narrow_mem == nullptr ? memory : narrow_mem;
1864 call->init_req( TypeFunc::Control, control() );
1865 call->init_req( TypeFunc::I_O, top() ); // does no i/o
1866 call->init_req( TypeFunc::Memory, m ); // may gc ptrs
1917 if (use->is_MergeMem()) {
1918 wl.push(use);
1919 }
1920 }
1921 }
1922
1923 // Replace the call with the current state of the kit.
1924 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
1925 JVMState* ejvms = nullptr;
1926 if (has_exceptions()) {
1927 ejvms = transfer_exceptions_into_jvms();
1928 }
1929
1930 ReplacedNodes replaced_nodes = map()->replaced_nodes();
1931 ReplacedNodes replaced_nodes_exception;
1932 Node* ex_ctl = top();
1933
1934 SafePointNode* final_state = stop();
1935
1936 // Find all the needed outputs of this call
1937 CallProjections callprojs;
1938 call->extract_projections(&callprojs, true);
1939
1940 Unique_Node_List wl;
1941 Node* init_mem = call->in(TypeFunc::Memory);
1942 Node* final_mem = final_state->in(TypeFunc::Memory);
1943 Node* final_ctl = final_state->in(TypeFunc::Control);
1944 Node* final_io = final_state->in(TypeFunc::I_O);
1945
1946 // Replace all the old call edges with the edges from the inlining result
1947 if (callprojs.fallthrough_catchproj != nullptr) {
1948 C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
1949 }
1950 if (callprojs.fallthrough_memproj != nullptr) {
1951 if (final_mem->is_MergeMem()) {
1952 // Parser's exits MergeMem was not transformed but may be optimized
1953 final_mem = _gvn.transform(final_mem);
1954 }
1955 C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem);
1956 add_mergemem_users_to_worklist(wl, final_mem);
1957 }
1958 if (callprojs.fallthrough_ioproj != nullptr) {
1959 C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io);
1960 }
1961
1962 // Replace the result with the new result if it exists and is used
1963 if (callprojs.resproj != nullptr && result != nullptr) {
1964 C->gvn_replace_by(callprojs.resproj, result);
1965 }
1966
1967 if (ejvms == nullptr) {
1968 // No exception edges to simply kill off those paths
1969 if (callprojs.catchall_catchproj != nullptr) {
1970 C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
1971 }
1972 if (callprojs.catchall_memproj != nullptr) {
1973 C->gvn_replace_by(callprojs.catchall_memproj, C->top());
1974 }
1975 if (callprojs.catchall_ioproj != nullptr) {
1976 C->gvn_replace_by(callprojs.catchall_ioproj, C->top());
1977 }
1978 // Replace the old exception object with top
1979 if (callprojs.exobj != nullptr) {
1980 C->gvn_replace_by(callprojs.exobj, C->top());
1981 }
1982 } else {
1983 GraphKit ekit(ejvms);
1984
1985 // Load my combined exception state into the kit, with all phis transformed:
1986 SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
1987 replaced_nodes_exception = ex_map->replaced_nodes();
1988
1989 Node* ex_oop = ekit.use_exception_state(ex_map);
1990
1991 if (callprojs.catchall_catchproj != nullptr) {
1992 C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
1993 ex_ctl = ekit.control();
1994 }
1995 if (callprojs.catchall_memproj != nullptr) {
1996 Node* ex_mem = ekit.reset_memory();
1997 C->gvn_replace_by(callprojs.catchall_memproj, ex_mem);
1998 add_mergemem_users_to_worklist(wl, ex_mem);
1999 }
2000 if (callprojs.catchall_ioproj != nullptr) {
2001 C->gvn_replace_by(callprojs.catchall_ioproj, ekit.i_o());
2002 }
2003
2004 // Replace the old exception object with the newly created one
2005 if (callprojs.exobj != nullptr) {
2006 C->gvn_replace_by(callprojs.exobj, ex_oop);
2007 }
2008 }
2009
2010 // Disconnect the call from the graph
2011 call->disconnect_inputs(C);
2012 C->gvn_replace_by(call, C->top());
2013
2014 // Clean up any MergeMems that feed other MergeMems since the
2015 // optimizer doesn't like that.
2016 while (wl.size() > 0) {
2017 _gvn.transform(wl.pop());
2018 }
2019
2020 if (callprojs.fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) {
2021 replaced_nodes.apply(C, final_ctl);
2022 }
2023 if (!ex_ctl->is_top() && do_replaced_nodes) {
2024 replaced_nodes_exception.apply(C, ex_ctl);
2025 }
2026 }
2027
2028
2029 //------------------------------increment_counter------------------------------
2030 // for statistics: increment a VM counter by 1
2031
2032 void GraphKit::increment_counter(address counter_addr) {
2033 Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2034 increment_counter(adr1);
2035 }
2036
2037 void GraphKit::increment_counter(Node* counter_addr) {
2038 int adr_type = Compile::AliasIdxRaw;
2039 Node* ctrl = control();
2040 Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered);
2199 *
2200 * @param n node that the type applies to
2201 * @param exact_kls type from profiling
2202 * @param maybe_null did profiling see null?
2203 *
2204 * @return node with improved type
2205 */
2206 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2207 const Type* current_type = _gvn.type(n);
2208 assert(UseTypeSpeculation, "type speculation must be on");
2209
2210 const TypePtr* speculative = current_type->speculative();
2211
2212 // Should the klass from the profile be recorded in the speculative type?
2213 if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2214 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls, Type::trust_interfaces);
2215 const TypeOopPtr* xtype = tklass->as_instance_type();
2216 assert(xtype->klass_is_exact(), "Should be exact");
2217 // Any reason to believe n is not null (from this profiling or a previous one)?
2218 assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2219 const TypePtr* ptr = (ptr_kind == ProfileMaybeNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2220 // record the new speculative type's depth
2221 speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2222 speculative = speculative->with_inline_depth(jvms()->depth());
2223 } else if (current_type->would_improve_ptr(ptr_kind)) {
2224 // Profiling report that null was never seen so we can change the
2225 // speculative type to non null ptr.
2226 if (ptr_kind == ProfileAlwaysNull) {
2227 speculative = TypePtr::NULL_PTR;
2228 } else {
2229 assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2230 const TypePtr* ptr = TypePtr::NOTNULL;
2231 if (speculative != nullptr) {
2232 speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2233 } else {
2234 speculative = ptr;
2235 }
2236 }
2237 }
2238
2239 if (speculative != current_type->speculative()) {
2240 // Build a type with a speculative type (what we think we know
2241 // about the type but will need a guard when we use it)
2242 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative);
2243 // We're changing the type, we need a new CheckCast node to carry
2244 // the new type. The new type depends on the control: what
2245 // profiling tells us is only valid from here as far as we can
2246 // tell.
2247 Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2248 cast = _gvn.transform(cast);
2249 replace_in_map(n, cast);
2250 n = cast;
2251 }
2252
2253 return n;
2254 }
2255
2256 /**
2257 * Record profiling data from receiver profiling at an invoke with the
2258 * type system so that it can propagate it (speculation)
2259 *
2260 * @param n receiver node
2261 *
2262 * @return node with improved type
2263 */
2264 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2265 if (!UseTypeSpeculation) {
2266 return n;
2267 }
2268 ciKlass* exact_kls = profile_has_unique_klass();
2269 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2270 if ((java_bc() == Bytecodes::_checkcast ||
2271 java_bc() == Bytecodes::_instanceof ||
2272 java_bc() == Bytecodes::_aastore) &&
2273 method()->method_data()->is_mature()) {
2274 ciProfileData* data = method()->method_data()->bci_to_data(bci());
2275 if (data != nullptr) {
2276 if (!data->as_BitData()->null_seen()) {
2277 ptr_kind = ProfileNeverNull;
2278 } else {
2279 assert(data->is_ReceiverTypeData(), "bad profile data type");
2280 ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2281 uint i = 0;
2282 for (; i < call->row_limit(); i++) {
2283 ciKlass* receiver = call->receiver(i);
2284 if (receiver != nullptr) {
2285 break;
2286 }
2287 }
2288 ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2289 }
2290 }
2291 }
2292 return record_profile_for_speculation(n, exact_kls, ptr_kind);
2293 }
2294
2295 /**
2296 * Record profiling data from argument profiling at an invoke with the
2297 * type system so that it can propagate it (speculation)
2298 *
2299 * @param dest_method target method for the call
2300 * @param bc what invoke bytecode is this?
2301 */
2302 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2303 if (!UseTypeSpeculation) {
2304 return;
2305 }
2306 const TypeFunc* tf = TypeFunc::make(dest_method);
2307 int nargs = tf->domain()->cnt() - TypeFunc::Parms;
2308 int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2309 for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2310 const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
2311 if (is_reference_type(targ->basic_type())) {
2312 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2313 ciKlass* better_type = nullptr;
2314 if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2315 record_profile_for_speculation(argument(j), better_type, ptr_kind);
2316 }
2317 i++;
2318 }
2319 }
2320 }
2321
2322 /**
2323 * Record profiling data from parameter profiling at an invoke with
2324 * the type system so that it can propagate it (speculation)
2325 */
2326 void GraphKit::record_profiled_parameters_for_speculation() {
2327 if (!UseTypeSpeculation) {
2328 return;
2329 }
2330 for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
2344 * the type system so that it can propagate it (speculation)
2345 */
2346 void GraphKit::record_profiled_return_for_speculation() {
2347 if (!UseTypeSpeculation) {
2348 return;
2349 }
2350 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2351 ciKlass* better_type = nullptr;
2352 if (method()->return_profiled_type(bci(), better_type, ptr_kind)) {
2353 // If profiling reports a single type for the return value,
2354 // feed it to the type system so it can propagate it as a
2355 // speculative type
2356 record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind);
2357 }
2358 }
2359
2360 void GraphKit::round_double_arguments(ciMethod* dest_method) {
2361 if (Matcher::strict_fp_requires_explicit_rounding) {
2362 // (Note: TypeFunc::make has a cache that makes this fast.)
2363 const TypeFunc* tf = TypeFunc::make(dest_method);
2364 int nargs = tf->domain()->cnt() - TypeFunc::Parms;
2365 for (int j = 0; j < nargs; j++) {
2366 const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
2367 if (targ->basic_type() == T_DOUBLE) {
2368 // If any parameters are doubles, they must be rounded before
2369 // the call, dprecision_rounding does gvn.transform
2370 Node *arg = argument(j);
2371 arg = dprecision_rounding(arg);
2372 set_argument(j, arg);
2373 }
2374 }
2375 }
2376 }
2377
2378 // rounding for strict float precision conformance
2379 Node* GraphKit::precision_rounding(Node* n) {
2380 if (Matcher::strict_fp_requires_explicit_rounding) {
2381 #ifdef IA32
2382 if (UseSSE == 0) {
2383 return _gvn.transform(new RoundFloatNode(0, n));
2384 }
2385 #else
2386 Unimplemented();
2495 // The first null ends the list.
2496 Node* parm0, Node* parm1,
2497 Node* parm2, Node* parm3,
2498 Node* parm4, Node* parm5,
2499 Node* parm6, Node* parm7) {
2500 assert(call_addr != nullptr, "must not call null targets");
2501
2502 // Slow-path call
2503 bool is_leaf = !(flags & RC_NO_LEAF);
2504 bool has_io = (!is_leaf && !(flags & RC_NO_IO));
2505 if (call_name == nullptr) {
2506 assert(!is_leaf, "must supply name for leaf");
2507 call_name = OptoRuntime::stub_name(call_addr);
2508 }
2509 CallNode* call;
2510 if (!is_leaf) {
2511 call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
2512 } else if (flags & RC_NO_FP) {
2513 call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2514 } else if (flags & RC_VECTOR){
2515 uint num_bits = call_type->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
2516 call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
2517 } else {
2518 call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2519 }
2520
2521 // The following is similar to set_edges_for_java_call,
2522 // except that the memory effects of the call are restricted to AliasIdxRaw.
2523
2524 // Slow path call has no side-effects, uses few values
2525 bool wide_in = !(flags & RC_NARROW_MEM);
2526 bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2527
2528 Node* prev_mem = nullptr;
2529 if (wide_in) {
2530 prev_mem = set_predefined_input_for_runtime_call(call);
2531 } else {
2532 assert(!wide_out, "narrow in => narrow out");
2533 Node* narrow_mem = memory(adr_type);
2534 prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2535 }
2575
2576 if (has_io) {
2577 set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2578 }
2579 return call;
2580
2581 }
2582
2583 // i2b
2584 Node* GraphKit::sign_extend_byte(Node* in) {
2585 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
2586 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
2587 }
2588
2589 // i2s
2590 Node* GraphKit::sign_extend_short(Node* in) {
2591 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
2592 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
2593 }
2594
2595 //------------------------------merge_memory-----------------------------------
2596 // Merge memory from one path into the current memory state.
2597 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
2598 for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
2599 Node* old_slice = mms.force_memory();
2600 Node* new_slice = mms.memory2();
2601 if (old_slice != new_slice) {
2602 PhiNode* phi;
2603 if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
2604 if (mms.is_empty()) {
2605 // clone base memory Phi's inputs for this memory slice
2606 assert(old_slice == mms.base_memory(), "sanity");
2607 phi = PhiNode::make(region, nullptr, Type::MEMORY, mms.adr_type(C));
2608 _gvn.set_type(phi, Type::MEMORY);
2609 for (uint i = 1; i < phi->req(); i++) {
2610 phi->init_req(i, old_slice->in(i));
2611 }
2612 } else {
2613 phi = old_slice->as_Phi(); // Phi was generated already
2614 }
2690
2691 // Fast check for identical types, perhaps identical constants.
2692 // The types can even be identical non-constants, in cases
2693 // involving Array.newInstance, Object.clone, etc.
2694 if (subklass == superklass)
2695 return C->top(); // false path is dead; no test needed.
2696
2697 if (gvn.type(superklass)->singleton()) {
2698 const TypeKlassPtr* superk = gvn.type(superklass)->is_klassptr();
2699 const TypeKlassPtr* subk = gvn.type(subklass)->is_klassptr();
2700
2701 // In the common case of an exact superklass, try to fold up the
2702 // test before generating code. You may ask, why not just generate
2703 // the code and then let it fold up? The answer is that the generated
2704 // code will necessarily include null checks, which do not always
2705 // completely fold away. If they are also needless, then they turn
2706 // into a performance loss. Example:
2707 // Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x;
2708 // Here, the type of 'fa' is often exact, so the store check
2709 // of fa[1]=x will fold up, without testing the nullness of x.
2710 switch (C->static_subtype_check(superk, subk)) {
2711 case Compile::SSC_always_false:
2712 {
2713 Node* always_fail = *ctrl;
2714 *ctrl = gvn.C->top();
2715 return always_fail;
2716 }
2717 case Compile::SSC_always_true:
2718 return C->top();
2719 case Compile::SSC_easy_test:
2720 {
2721 // Just do a direct pointer compare and be done.
2722 IfNode* iff = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_STATIC_FREQUENT, gvn, T_ADDRESS);
2723 *ctrl = gvn.transform(new IfTrueNode(iff));
2724 return gvn.transform(new IfFalseNode(iff));
2725 }
2726 case Compile::SSC_full_test:
2727 break;
2728 default:
2729 ShouldNotReachHere();
2871
2872 // Now do a linear scan of the secondary super-klass array. Again, no real
2873 // performance impact (too rare) but it's gotta be done.
2874 // Since the code is rarely used, there is no penalty for moving it
2875 // out of line, and it can only improve I-cache density.
2876 // The decision to inline or out-of-line this final check is platform
2877 // dependent, and is found in the AD file definition of PartialSubtypeCheck.
2878 Node* psc = gvn.transform(
2879 new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
2880
2881 IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
2882 r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
2883 r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
2884
2885 // Return false path; set default control to true path.
2886 *ctrl = gvn.transform(r_ok_subtype);
2887 return gvn.transform(r_not_subtype);
2888 }
2889
2890 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
2891 bool expand_subtype_check = C->post_loop_opts_phase() || // macro node expansion is over
2892 ExpandSubTypeCheckAtParseTime; // forced expansion
2893 if (expand_subtype_check) {
2894 MergeMemNode* mem = merged_memory();
2895 Node* ctrl = control();
2896 Node* subklass = obj_or_subklass;
2897 if (!_gvn.type(obj_or_subklass)->isa_klassptr()) {
2898 subklass = load_object_klass(obj_or_subklass);
2899 }
2900
2901 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn, method(), bci());
2902 set_control(ctrl);
2903 return n;
2904 }
2905
2906 Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass, method(), bci()));
2907 Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
2908 IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
2909 set_control(_gvn.transform(new IfTrueNode(iff)));
2910 return _gvn.transform(new IfFalseNode(iff));
2911 }
2912
2913 // Profile-driven exact type check:
2914 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
2915 float prob,
2916 Node* *casted_receiver) {
2917 assert(!klass->is_interface(), "no exact type check on interfaces");
2918
2919 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces);
2920 Node* recv_klass = load_object_klass(receiver);
2921 Node* want_klass = makecon(tklass);
2922 Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
2923 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
2924 IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
2925 set_control( _gvn.transform(new IfTrueNode (iff)));
2926 Node* fail = _gvn.transform(new IfFalseNode(iff));
2927
2928 if (!stopped()) {
2929 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
2930 const TypeOopPtr* recvx_type = tklass->as_instance_type();
2931 assert(recvx_type->klass_is_exact(), "");
2932
2933 if (!receiver_type->higher_equal(recvx_type)) { // ignore redundant casts
2934 // Subsume downstream occurrences of receiver with a cast to
2935 // recv_xtype, since now we know what the type will be.
2936 Node* cast = new CheckCastPPNode(control(), receiver, recvx_type);
2937 (*casted_receiver) = _gvn.transform(cast);
2938 assert(!(*casted_receiver)->is_top(), "that path should be unreachable");
2939 // (User must make the replace_in_map call.)
2940 }
2941 }
2942
2943 return fail;
2944 }
2945
2946 //------------------------------subtype_check_receiver-------------------------
2947 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
2948 Node** casted_receiver) {
2949 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces)->try_improve();
2950 Node* want_klass = makecon(tklass);
2951
2952 Node* slow_ctl = gen_subtype_check(receiver, want_klass);
2953
2954 // Ignore interface type information until interface types are properly tracked.
2955 if (!stopped() && !klass->is_interface()) {
2956 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
2957 const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
2958 if (!receiver_type->higher_equal(recv_type)) { // ignore redundant casts
2959 Node* cast = new CheckCastPPNode(control(), receiver, recv_type);
2960 (*casted_receiver) = _gvn.transform(cast);
2961 }
2962 }
2963
2964 return slow_ctl;
2965 }
2966
2967 //------------------------------seems_never_null-------------------------------
2968 // Use null_seen information if it is available from the profile.
2969 // If we see an unexpected null at a type check we record it and force a
2970 // recompile; the offending check will be recompiled to handle nulls.
2971 // If we see several offending BCIs, then all checks in the
2972 // method will be recompiled.
2973 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
2974 speculating = !_gvn.type(obj)->speculative_maybe_null();
2975 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
2976 if (UncommonNullCast // Cutout for this technique
2977 && obj != null() // And not the -Xcomp stupid case?
2978 && !too_many_traps(reason)
2979 ) {
2980 if (speculating) {
3049
3050 //------------------------maybe_cast_profiled_receiver-------------------------
3051 // If the profile has seen exactly one type, narrow to exactly that type.
3052 // Subsequent type checks will always fold up.
3053 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3054 const TypeKlassPtr* require_klass,
3055 ciKlass* spec_klass,
3056 bool safe_for_replace) {
3057 if (!UseTypeProfile || !TypeProfileCasts) return nullptr;
3058
3059 Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != nullptr);
3060
3061 // Make sure we haven't already deoptimized from this tactic.
3062 if (too_many_traps_or_recompiles(reason))
3063 return nullptr;
3064
3065 // (No, this isn't a call, but it's enough like a virtual call
3066 // to use the same ciMethod accessor to get the profile info...)
3067 // If we have a speculative type use it instead of profiling (which
3068 // may not help us)
3069 ciKlass* exact_kls = spec_klass == nullptr ? profile_has_unique_klass() : spec_klass;
3070 if (exact_kls != nullptr) {// no cast failures here
3071 if (require_klass == nullptr ||
3072 C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) {
3073 // If we narrow the type to match what the type profile sees or
3074 // the speculative type, we can then remove the rest of the
3075 // cast.
3076 // This is a win, even if the exact_kls is very specific,
3077 // because downstream operations, such as method calls,
3078 // will often benefit from the sharper type.
3079 Node* exact_obj = not_null_obj; // will get updated in place...
3080 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
3081 &exact_obj);
3082 { PreserveJVMState pjvms(this);
3083 set_control(slow_ctl);
3084 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3085 }
3086 if (safe_for_replace) {
3087 replace_in_map(not_null_obj, exact_obj);
3088 }
3089 return exact_obj;
3179 // If not_null_obj is dead, only null-path is taken
3180 if (stopped()) { // Doing instance-of on a null?
3181 set_control(null_ctl);
3182 return intcon(0);
3183 }
3184 region->init_req(_null_path, null_ctl);
3185 phi ->init_req(_null_path, intcon(0)); // Set null path value
3186 if (null_ctl == top()) {
3187 // Do this eagerly, so that pattern matches like is_diamond_phi
3188 // will work even during parsing.
3189 assert(_null_path == PATH_LIMIT-1, "delete last");
3190 region->del_req(_null_path);
3191 phi ->del_req(_null_path);
3192 }
3193
3194 // Do we know the type check always succeed?
3195 bool known_statically = false;
3196 if (_gvn.type(superklass)->singleton()) {
3197 const TypeKlassPtr* superk = _gvn.type(superklass)->is_klassptr();
3198 const TypeKlassPtr* subk = _gvn.type(obj)->is_oopptr()->as_klass_type();
3199 if (subk->is_loaded()) {
3200 int static_res = C->static_subtype_check(superk, subk);
3201 known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
3202 }
3203 }
3204
3205 if (!known_statically) {
3206 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3207 // We may not have profiling here or it may not help us. If we
3208 // have a speculative type use it to perform an exact cast.
3209 ciKlass* spec_obj_type = obj_type->speculative_type();
3210 if (spec_obj_type != nullptr || (ProfileDynamicTypes && data != nullptr)) {
3211 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, nullptr, spec_obj_type, safe_for_replace);
3212 if (stopped()) { // Profile disagrees with this path.
3213 set_control(null_ctl); // Null is the only remaining possibility.
3214 return intcon(0);
3215 }
3216 if (cast_obj != nullptr) {
3217 not_null_obj = cast_obj;
3218 }
3219 }
3235 record_for_igvn(region);
3236
3237 // If we know the type check always succeeds then we don't use the
3238 // profiling data at this bytecode. Don't lose it, feed it to the
3239 // type system as a speculative type.
3240 if (safe_for_replace) {
3241 Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3242 replace_in_map(obj, casted_obj);
3243 }
3244
3245 return _gvn.transform(phi);
3246 }
3247
3248 //-------------------------------gen_checkcast---------------------------------
3249 // Generate a checkcast idiom. Used by both the checkcast bytecode and the
3250 // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
3251 // uncommon-trap paths work. Adjust stack after this call.
3252 // If failure_control is supplied and not null, it is filled in with
3253 // the control edge for the cast failure. Otherwise, an appropriate
3254 // uncommon trap or exception is thrown.
3255 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
3256 Node* *failure_control) {
3257 kill_dead_locals(); // Benefit all the uncommon traps
3258 const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr()->try_improve();
3259 const TypeOopPtr *toop = tk->cast_to_exactness(false)->as_instance_type();
3260
3261 // Fast cutout: Check the case that the cast is vacuously true.
3262 // This detects the common cases where the test will short-circuit
3263 // away completely. We do this before we perform the null check,
3264 // because if the test is going to turn into zero code, we don't
3265 // want a residual null check left around. (Causes a slowdown,
3266 // for example, in some objArray manipulations, such as a[i]=a[j].)
3267 if (tk->singleton()) {
3268 const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
3269 if (objtp != nullptr) {
3270 switch (C->static_subtype_check(tk, objtp->as_klass_type())) {
3271 case Compile::SSC_always_true:
3272 // If we know the type check always succeed then we don't use
3273 // the profiling data at this bytecode. Don't lose it, feed it
3274 // to the type system as a speculative type.
3275 return record_profiled_receiver_for_speculation(obj);
3276 case Compile::SSC_always_false:
3277 // It needs a null check because a null will *pass* the cast check.
3278 // A non-null value will always produce an exception.
3279 if (!objtp->maybe_null()) {
3280 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3281 Deoptimization::DeoptReason reason = is_aastore ?
3282 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3283 builtin_throw(reason);
3284 return top();
3285 } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3286 return null_assert(obj);
3287 }
3288 break; // Fall through to full check
3289 default:
3290 break;
3291 }
3292 }
3293 }
3294
3295 ciProfileData* data = nullptr;
3296 bool safe_for_replace = false;
3297 if (failure_control == nullptr) { // use MDO in regular case only
3298 assert(java_bc() == Bytecodes::_aastore ||
3299 java_bc() == Bytecodes::_checkcast,
3300 "interpreter profiles type checks only for these BCs");
3301 data = method()->method_data()->bci_to_data(bci());
3302 safe_for_replace = true;
3303 }
3304
3305 // Make the merge point
3306 enum { _obj_path = 1, _null_path, PATH_LIMIT };
3307 RegionNode* region = new RegionNode(PATH_LIMIT);
3308 Node* phi = new PhiNode(region, toop);
3309 C->set_has_split_ifs(true); // Has chance for split-if optimization
3310
3311 // Use null-cast information if it is available
3312 bool speculative_not_null = false;
3313 bool never_see_null = ((failure_control == nullptr) // regular case only
3314 && seems_never_null(obj, data, speculative_not_null));
3315
3316 // Null check; get casted pointer; set region slot 3
3317 Node* null_ctl = top();
3318 Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3319
3320 // If not_null_obj is dead, only null-path is taken
3321 if (stopped()) { // Doing instance-of on a null?
3322 set_control(null_ctl);
3323 return null();
3324 }
3325 region->init_req(_null_path, null_ctl);
3326 phi ->init_req(_null_path, null()); // Set null path value
3327 if (null_ctl == top()) {
3328 // Do this eagerly, so that pattern matches like is_diamond_phi
3329 // will work even during parsing.
3330 assert(_null_path == PATH_LIMIT-1, "delete last");
3331 region->del_req(_null_path);
3332 phi ->del_req(_null_path);
3333 }
3334
3335 Node* cast_obj = nullptr;
3336 if (tk->klass_is_exact()) {
3337 // The following optimization tries to statically cast the speculative type of the object
3338 // (for example obtained during profiling) to the type of the superklass and then do a
3339 // dynamic check that the type of the object is what we expect. To work correctly
3340 // for checkcast and aastore the type of superklass should be exact.
3341 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3342 // We may not have profiling here or it may not help us. If we have
3343 // a speculative type use it to perform an exact cast.
3344 ciKlass* spec_obj_type = obj_type->speculative_type();
3345 if (spec_obj_type != nullptr || data != nullptr) {
3346 cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk, spec_obj_type, safe_for_replace);
3347 if (cast_obj != nullptr) {
3348 if (failure_control != nullptr) // failure is now impossible
3349 (*failure_control) = top();
3350 // adjust the type of the phi to the exact klass:
3351 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3352 }
3353 }
3354 }
3355
3356 if (cast_obj == nullptr) {
3357 // Generate the subtype check
3358 Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass );
3359
3360 // Plug in success path into the merge
3361 cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3362 // Failure path ends in uncommon trap (or may be dead - failure impossible)
3363 if (failure_control == nullptr) {
3364 if (not_subtype_ctrl != top()) { // If failure is possible
3365 PreserveJVMState pjvms(this);
3366 set_control(not_subtype_ctrl);
3367 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3368 Deoptimization::DeoptReason reason = is_aastore ?
3369 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3370 builtin_throw(reason);
3371 }
3372 } else {
3373 (*failure_control) = not_subtype_ctrl;
3374 }
3375 }
3376
3377 region->init_req(_obj_path, control());
3378 phi ->init_req(_obj_path, cast_obj);
3379
3380 // A merge of null or Casted-NotNull obj
3381 Node* res = _gvn.transform(phi);
3382
3383 // Note I do NOT always 'replace_in_map(obj,result)' here.
3384 // if( tk->klass()->can_be_primary_super() )
3385 // This means that if I successfully store an Object into an array-of-String
3386 // I 'forget' that the Object is really now known to be a String. I have to
3387 // do this because we don't have true union types for interfaces - if I store
3388 // a Baz into an array-of-Interface and then tell the optimizer it's an
3389 // Interface, I forget that it's also a Baz and cannot do Baz-like field
3390 // references to it. FIX THIS WHEN UNION TYPES APPEAR!
3391 // replace_in_map( obj, res );
3392
3393 // Return final merged results
3394 set_control( _gvn.transform(region) );
3395 record_for_igvn(region);
3396
3397 return record_profiled_receiver_for_speculation(res);
3398 }
3399
3400 //------------------------------next_monitor-----------------------------------
3401 // What number should be given to the next monitor?
3402 int GraphKit::next_monitor() {
3403 int current = jvms()->monitor_depth()* C->sync_stack_slots();
3404 int next = current + C->sync_stack_slots();
3405 // Keep the toplevel high water mark current:
3406 if (C->fixed_slots() < next) C->set_fixed_slots(next);
3407 return current;
3408 }
3409
3410 //------------------------------insert_mem_bar---------------------------------
3411 // Memory barrier to avoid floating things around
3412 // The membar serves as a pinch point between both control and all memory slices.
3413 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3414 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3415 mb->init_req(TypeFunc::Control, control());
3416 mb->init_req(TypeFunc::Memory, reset_memory());
3417 Node* membar = _gvn.transform(mb);
3445 }
3446 Node* membar = _gvn.transform(mb);
3447 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3448 if (alias_idx == Compile::AliasIdxBot) {
3449 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3450 } else {
3451 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3452 }
3453 return membar;
3454 }
3455
3456 //------------------------------shared_lock------------------------------------
3457 // Emit locking code.
3458 FastLockNode* GraphKit::shared_lock(Node* obj) {
3459 // bci is either a monitorenter bc or InvocationEntryBci
3460 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3461 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3462
3463 if( !GenerateSynchronizationCode )
3464 return nullptr; // Not locking things?
3465 if (stopped()) // Dead monitor?
3466 return nullptr;
3467
3468 assert(dead_locals_are_killed(), "should kill locals before sync. point");
3469
3470 // Box the stack location
3471 Node* box = new BoxLockNode(next_monitor());
3472 // Check for bailout after new BoxLockNode
3473 if (failing()) { return nullptr; }
3474 box = _gvn.transform(box);
3475 Node* mem = reset_memory();
3476
3477 FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
3478
3479 // Create the rtm counters for this fast lock if needed.
3480 flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3481
3482 // Add monitor to debug info for the slow path. If we block inside the
3483 // slow path and de-opt, we need the monitor hanging around
3484 map()->push_monitor( flock );
3516 }
3517 #endif
3518
3519 return flock;
3520 }
3521
3522
3523 //------------------------------shared_unlock----------------------------------
3524 // Emit unlocking code.
3525 void GraphKit::shared_unlock(Node* box, Node* obj) {
3526 // bci is either a monitorenter bc or InvocationEntryBci
3527 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3528 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3529
3530 if( !GenerateSynchronizationCode )
3531 return;
3532 if (stopped()) { // Dead monitor?
3533 map()->pop_monitor(); // Kill monitor from debug info
3534 return;
3535 }
3536
3537 // Memory barrier to avoid floating things down past the locked region
3538 insert_mem_bar(Op_MemBarReleaseLock);
3539
3540 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3541 UnlockNode *unlock = new UnlockNode(C, tf);
3542 #ifdef ASSERT
3543 unlock->set_dbg_jvms(sync_jvms());
3544 #endif
3545 uint raw_idx = Compile::AliasIdxRaw;
3546 unlock->init_req( TypeFunc::Control, control() );
3547 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3548 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3549 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3550 unlock->init_req( TypeFunc::ReturnAdr, top() );
3551
3552 unlock->init_req(TypeFunc::Parms + 0, obj);
3553 unlock->init_req(TypeFunc::Parms + 1, box);
3554 unlock = _gvn.transform(unlock)->as_Unlock();
3555
3556 Node* mem = reset_memory();
3557
3558 // unlock has no side-effects, sets few values
3559 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3560
3561 // Kill monitor from debug info
3562 map()->pop_monitor( );
3563 }
3564
3565 //-------------------------------get_layout_helper-----------------------------
3566 // If the given klass is a constant or known to be an array,
3567 // fetch the constant layout helper value into constant_value
3568 // and return null. Otherwise, load the non-constant
3569 // layout helper value, and return the node which represents it.
3570 // This two-faced routine is useful because allocation sites
3571 // almost always feature constant types.
3572 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3573 const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
3574 if (!StressReflectiveCode && klass_t != nullptr) {
3575 bool xklass = klass_t->klass_is_exact();
3576 if (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM)) {
3577 jint lhelper;
3578 if (klass_t->isa_aryklassptr()) {
3579 BasicType elem = klass_t->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type();
3580 if (is_reference_type(elem, true)) {
3581 elem = T_OBJECT;
3582 }
3583 lhelper = Klass::array_layout_helper(elem);
3584 } else {
3585 lhelper = klass_t->is_instklassptr()->exact_klass()->layout_helper();
3586 }
3587 if (lhelper != Klass::_lh_neutral_value) {
3588 constant_value = lhelper;
3589 return (Node*) nullptr;
3590 }
3591 }
3592 }
3593 constant_value = Klass::_lh_neutral_value; // put in a known value
3594 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3595 return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered);
3596 }
3597
3598 // We just put in an allocate/initialize with a big raw-memory effect.
3599 // Hook selected additional alias categories on the initialization.
3600 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3601 MergeMemNode* init_in_merge,
3602 Node* init_out_raw) {
3603 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
3604 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
3605
3606 Node* prevmem = kit.memory(alias_idx);
3607 init_in_merge->set_memory_at(alias_idx, prevmem);
3608 kit.set_memory(init_out_raw, alias_idx);
3609 }
3610
3611 //---------------------------set_output_for_allocation-------------------------
3612 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
3613 const TypeOopPtr* oop_type,
3614 bool deoptimize_on_exception) {
3615 int rawidx = Compile::AliasIdxRaw;
3616 alloc->set_req( TypeFunc::FramePtr, frameptr() );
3617 add_safepoint_edges(alloc);
3618 Node* allocx = _gvn.transform(alloc);
3619 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
3620 // create memory projection for i_o
3621 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
3622 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
3623
3624 // create a memory projection as for the normal control path
3625 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3626 set_memory(malloc, rawidx);
3627
3628 // a normal slow-call doesn't change i_o, but an allocation does
3629 // we create a separate i_o projection for the normal control path
3630 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3631 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3632
3633 // put in an initialization barrier
3634 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
3635 rawoop)->as_Initialize();
3636 assert(alloc->initialization() == init, "2-way macro link must work");
3637 assert(init ->allocation() == alloc, "2-way macro link must work");
3638 {
3639 // Extract memory strands which may participate in the new object's
3640 // initialization, and source them from the new InitializeNode.
3641 // This will allow us to observe initializations when they occur,
3642 // and link them properly (as a group) to the InitializeNode.
3643 assert(init->in(InitializeNode::Memory) == malloc, "");
3644 MergeMemNode* minit_in = MergeMemNode::make(malloc);
3645 init->set_req(InitializeNode::Memory, minit_in);
3646 record_for_igvn(minit_in); // fold it up later, if possible
3647 Node* minit_out = memory(rawidx);
3648 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
3649 // Add an edge in the MergeMem for the header fields so an access
3650 // to one of those has correct memory state
3651 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
3652 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
3653 if (oop_type->isa_aryptr()) {
3654 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
3655 int elemidx = C->get_alias_index(telemref);
3656 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
3657 } else if (oop_type->isa_instptr()) {
3658 ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
3659 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
3660 ciField* field = ik->nonstatic_field_at(i);
3661 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
3662 continue; // do not bother to track really large numbers of fields
3663 // Find (or create) the alias category for this field:
3664 int fieldidx = C->alias_type(field)->index();
3665 hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3666 }
3667 }
3668 }
3669
3670 // Cast raw oop to the real thing...
3671 Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
3672 javaoop = _gvn.transform(javaoop);
3673 C->set_recent_alloc(control(), javaoop);
3674 assert(just_allocated_object(control()) == javaoop, "just allocated");
3675
3676 #ifdef ASSERT
3677 { // Verify that the AllocateNode::Ideal_allocation recognizers work:
3688 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
3689 }
3690 }
3691 #endif //ASSERT
3692
3693 return javaoop;
3694 }
3695
3696 //---------------------------new_instance--------------------------------------
3697 // This routine takes a klass_node which may be constant (for a static type)
3698 // or may be non-constant (for reflective code). It will work equally well
3699 // for either, and the graph will fold nicely if the optimizer later reduces
3700 // the type to a constant.
3701 // The optional arguments are for specialized use by intrinsics:
3702 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3703 // - If 'return_size_val', report the total object size to the caller.
3704 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3705 Node* GraphKit::new_instance(Node* klass_node,
3706 Node* extra_slow_test,
3707 Node* *return_size_val,
3708 bool deoptimize_on_exception) {
3709 // Compute size in doublewords
3710 // The size is always an integral number of doublewords, represented
3711 // as a positive bytewise size stored in the klass's layout_helper.
3712 // The layout_helper also encodes (in a low bit) the need for a slow path.
3713 jint layout_con = Klass::_lh_neutral_value;
3714 Node* layout_val = get_layout_helper(klass_node, layout_con);
3715 int layout_is_con = (layout_val == nullptr);
3716
3717 if (extra_slow_test == nullptr) extra_slow_test = intcon(0);
3718 // Generate the initial go-slow test. It's either ALWAYS (return a
3719 // Node for 1) or NEVER (return a null) or perhaps (in the reflective
3720 // case) a computed value derived from the layout_helper.
3721 Node* initial_slow_test = nullptr;
3722 if (layout_is_con) {
3723 assert(!StressReflectiveCode, "stress mode does not use these paths");
3724 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3725 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3726 } else { // reflective case
3727 // This reflective path is used by Unsafe.allocateInstance.
3728 // (It may be stress-tested by specifying StressReflectiveCode.)
3729 // Basically, we want to get into the VM is there's an illegal argument.
3730 Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
3731 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
3732 if (extra_slow_test != intcon(0)) {
3733 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
3734 }
3735 // (Macro-expander will further convert this to a Bool, if necessary.)
3746
3747 // Clear the low bits to extract layout_helper_size_in_bytes:
3748 assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
3749 Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
3750 size = _gvn.transform( new AndXNode(size, mask) );
3751 }
3752 if (return_size_val != nullptr) {
3753 (*return_size_val) = size;
3754 }
3755
3756 // This is a precise notnull oop of the klass.
3757 // (Actually, it need not be precise if this is a reflective allocation.)
3758 // It's what we cast the result to.
3759 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
3760 if (!tklass) tklass = TypeInstKlassPtr::OBJECT;
3761 const TypeOopPtr* oop_type = tklass->as_instance_type();
3762
3763 // Now generate allocation code
3764
3765 // The entire memory state is needed for slow path of the allocation
3766 // since GC and deoptimization can happened.
3767 Node *mem = reset_memory();
3768 set_all_memory(mem); // Create new memory state
3769
3770 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3771 control(), mem, i_o(),
3772 size, klass_node,
3773 initial_slow_test);
3774
3775 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
3776 }
3777
3778 //-------------------------------new_array-------------------------------------
3779 // helper for both newarray and anewarray
3780 // The 'length' parameter is (obviously) the length of the array.
3781 // The optional arguments are for specialized use by intrinsics:
3782 // - If 'return_size_val', report the non-padded array size (sum of header size
3783 // and array body) to the caller.
3784 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3785 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3786 Node* length, // number of array elements
3787 int nargs, // number of arguments to push back for uncommon trap
3788 Node* *return_size_val,
3789 bool deoptimize_on_exception) {
3790 jint layout_con = Klass::_lh_neutral_value;
3791 Node* layout_val = get_layout_helper(klass_node, layout_con);
3792 int layout_is_con = (layout_val == nullptr);
3793
3794 if (!layout_is_con && !StressReflectiveCode &&
3795 !too_many_traps(Deoptimization::Reason_class_check)) {
3796 // This is a reflective array creation site.
3797 // Optimistically assume that it is a subtype of Object[],
3798 // so that we can fold up all the address arithmetic.
3799 layout_con = Klass::array_layout_helper(T_OBJECT);
3800 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
3801 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
3802 { BuildCutout unless(this, bol_lh, PROB_MAX);
3803 inc_sp(nargs);
3804 uncommon_trap(Deoptimization::Reason_class_check,
3805 Deoptimization::Action_maybe_recompile);
3806 }
3807 layout_val = nullptr;
3808 layout_is_con = true;
3809 }
3810
3811 // Generate the initial go-slow test. Make sure we do not overflow
3812 // if length is huge (near 2Gig) or negative! We do not need
3813 // exact double-words here, just a close approximation of needed
3814 // double-words. We can't add any offset or rounding bits, lest we
3815 // take a size -1 of bytes and make it positive. Use an unsigned
3816 // compare, so negative sizes look hugely positive.
3817 int fast_size_limit = FastAllocateSizeLimit;
3818 if (layout_is_con) {
3819 assert(!StressReflectiveCode, "stress mode does not use these paths");
3820 // Increase the size limit if we have exact knowledge of array type.
3821 int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
3822 fast_size_limit <<= (LogBytesPerLong - log2_esize);
3823 }
3824
3825 Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
3826 Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
3827
3828 // --- Size Computation ---
3829 // array_size = round_to_heap(array_header + (length << elem_shift));
3830 // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
3831 // and align_to(x, y) == ((x + y-1) & ~(y-1))
3832 // The rounding mask is strength-reduced, if possible.
3833 int round_mask = MinObjAlignmentInBytes - 1;
3834 Node* header_size = nullptr;
3835 // (T_BYTE has the weakest alignment and size restrictions...)
3836 if (layout_is_con) {
3837 int hsize = Klass::layout_helper_header_size(layout_con);
3838 int eshift = Klass::layout_helper_log2_element_size(layout_con);
3839 if ((round_mask & ~right_n_bits(eshift)) == 0)
3840 round_mask = 0; // strength-reduce it if it goes away completely
3841 assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
3842 int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
3843 assert(header_size_min <= hsize, "generic minimum is smallest");
3844 header_size = intcon(hsize);
3845 } else {
3846 Node* hss = intcon(Klass::_lh_header_size_shift);
3847 Node* hsm = intcon(Klass::_lh_header_size_mask);
3848 header_size = _gvn.transform(new URShiftINode(layout_val, hss));
3849 header_size = _gvn.transform(new AndINode(header_size, hsm));
3850 }
3851
3852 Node* elem_shift = nullptr;
3853 if (layout_is_con) {
3854 int eshift = Klass::layout_helper_log2_element_size(layout_con);
3855 if (eshift != 0)
3856 elem_shift = intcon(eshift);
3857 } else {
3858 // There is no need to mask or shift this value.
3859 // The semantics of LShiftINode include an implicit mask to 0x1F.
3860 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
3861 elem_shift = layout_val;
3908 }
3909 Node* non_rounded_size = _gvn.transform(new AddXNode(headerx, abody));
3910
3911 if (return_size_val != nullptr) {
3912 // This is the size
3913 (*return_size_val) = non_rounded_size;
3914 }
3915
3916 Node* size = non_rounded_size;
3917 if (round_mask != 0) {
3918 Node* mask1 = MakeConX(round_mask);
3919 size = _gvn.transform(new AddXNode(size, mask1));
3920 Node* mask2 = MakeConX(~round_mask);
3921 size = _gvn.transform(new AndXNode(size, mask2));
3922 }
3923 // else if round_mask == 0, the size computation is self-rounding
3924
3925 // Now generate allocation code
3926
3927 // The entire memory state is needed for slow path of the allocation
3928 // since GC and deoptimization can happened.
3929 Node *mem = reset_memory();
3930 set_all_memory(mem); // Create new memory state
3931
3932 if (initial_slow_test->is_Bool()) {
3933 // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
3934 initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
3935 }
3936
3937 const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3938 Node* valid_length_test = _gvn.intcon(1);
3939 if (ary_type->isa_aryptr()) {
3940 BasicType bt = ary_type->isa_aryptr()->elem()->array_element_basic_type();
3941 jint max = TypeAryPtr::max_array_length(bt);
3942 Node* valid_length_cmp = _gvn.transform(new CmpUNode(length, intcon(max)));
3943 valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le));
3944 }
3945
3946 // Create the AllocateArrayNode and its result projections
3947 AllocateArrayNode* alloc
3948 = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3949 control(), mem, i_o(),
3950 size, klass_node,
3951 initial_slow_test,
3952 length, valid_length_test);
3953
3954 // Cast to correct type. Note that the klass_node may be constant or not,
3955 // and in the latter case the actual array type will be inexact also.
3956 // (This happens via a non-constant argument to inline_native_newArray.)
3957 // In any case, the value of klass_node provides the desired array type.
3958 const TypeInt* length_type = _gvn.find_int_type(length);
3959 if (ary_type->isa_aryptr() && length_type != nullptr) {
3960 // Try to get a better type than POS for the size
3961 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3962 }
3963
3964 Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
3965
3966 array_ideal_length(alloc, ary_type, true);
3967 return javaoop;
3968 }
3969
3970 // The following "Ideal_foo" functions are placed here because they recognize
3971 // the graph shapes created by the functions immediately above.
3972
3973 //---------------------------Ideal_allocation----------------------------------
4080 set_all_memory(ideal.merged_memory());
4081 set_i_o(ideal.i_o());
4082 set_control(ideal.ctrl());
4083 }
4084
4085 void GraphKit::final_sync(IdealKit& ideal) {
4086 // Final sync IdealKit and graphKit.
4087 sync_kit(ideal);
4088 }
4089
4090 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4091 Node* len = load_array_length(load_String_value(str, set_ctrl));
4092 Node* coder = load_String_coder(str, set_ctrl);
4093 // Divide length by 2 if coder is UTF16
4094 return _gvn.transform(new RShiftINode(len, coder));
4095 }
4096
4097 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4098 int value_offset = java_lang_String::value_offset();
4099 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4100 false, nullptr, 0);
4101 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4102 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4103 TypeAry::make(TypeInt::BYTE, TypeInt::POS),
4104 ciTypeArrayKlass::make(T_BYTE), true, 0);
4105 Node* p = basic_plus_adr(str, str, value_offset);
4106 Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4107 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4108 return load;
4109 }
4110
4111 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4112 if (!CompactStrings) {
4113 return intcon(java_lang_String::CODER_UTF16);
4114 }
4115 int coder_offset = java_lang_String::coder_offset();
4116 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4117 false, nullptr, 0);
4118 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4119
4120 Node* p = basic_plus_adr(str, str, coder_offset);
4121 Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4122 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4123 return load;
4124 }
4125
4126 void GraphKit::store_String_value(Node* str, Node* value) {
4127 int value_offset = java_lang_String::value_offset();
4128 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4129 false, nullptr, 0);
4130 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4131
4132 access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,
4133 value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4134 }
4135
4136 void GraphKit::store_String_coder(Node* str, Node* value) {
4137 int coder_offset = java_lang_String::coder_offset();
4138 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4139 false, nullptr, 0);
4140 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4141
4142 access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4143 value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4144 }
4145
4146 // Capture src and dst memory state with a MergeMemNode
4147 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4148 if (src_type == dst_type) {
4149 // Types are equal, we don't need a MergeMemNode
4150 return memory(src_type);
4151 }
4152 MergeMemNode* merge = MergeMemNode::make(map()->memory());
4153 record_for_igvn(merge); // fold it up later, if possible
4154 int src_idx = C->get_alias_index(src_type);
4155 int dst_idx = C->get_alias_index(dst_type);
4156 merge->set_memory_at(src_idx, memory(src_idx));
4157 merge->set_memory_at(dst_idx, memory(dst_idx));
4158 return merge;
4159 }
4232 i_char->init_req(2, AddI(i_char, intcon(2)));
4233
4234 set_control(IfFalse(iff));
4235 set_memory(st, TypeAryPtr::BYTES);
4236 }
4237
4238 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4239 if (!field->is_constant()) {
4240 return nullptr; // Field not marked as constant.
4241 }
4242 ciInstance* holder = nullptr;
4243 if (!field->is_static()) {
4244 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4245 if (const_oop != nullptr && const_oop->is_instance()) {
4246 holder = const_oop->as_instance();
4247 }
4248 }
4249 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4250 /*is_unsigned_load=*/false);
4251 if (con_type != nullptr) {
4252 return makecon(con_type);
4253 }
4254 return nullptr;
4255 }
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/ciFlatArrayKlass.hpp"
27 #include "ci/ciInlineKlass.hpp"
28 #include "ci/ciUtilities.hpp"
29 #include "classfile/javaClasses.hpp"
30 #include "ci/ciObjArray.hpp"
31 #include "asm/register.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/c2/barrierSetC2.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "opto/addnode.hpp"
38 #include "opto/castnode.hpp"
39 #include "opto/convertnode.hpp"
40 #include "opto/graphKit.hpp"
41 #include "opto/idealKit.hpp"
42 #include "opto/inlinetypenode.hpp"
43 #include "opto/intrinsicnode.hpp"
44 #include "opto/locknode.hpp"
45 #include "opto/machnode.hpp"
46 #include "opto/narrowptrnode.hpp"
47 #include "opto/opaquenode.hpp"
48 #include "opto/parse.hpp"
49 #include "opto/rootnode.hpp"
50 #include "opto/runtime.hpp"
51 #include "opto/subtypenode.hpp"
52 #include "runtime/deoptimization.hpp"
53 #include "runtime/sharedRuntime.hpp"
54 #include "utilities/bitMap.inline.hpp"
55 #include "utilities/powerOfTwo.hpp"
56 #include "utilities/growableArray.hpp"
57
58 //----------------------------GraphKit-----------------------------------------
59 // Main utility constructor.
60 GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn)
61 : Phase(Phase::Parser),
62 _env(C->env()),
63 _gvn((gvn != nullptr) ? *gvn : *C->initial_gvn()),
64 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
65 {
66 assert(gvn == nullptr || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled");
67 _exceptions = jvms->map()->next_exception();
68 if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr);
69 set_jvms(jvms);
70 #ifdef ASSERT
71 if (_gvn.is_IterGVN() != nullptr) {
72 assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used");
73 // Save the initial size of _for_igvn worklist for verification (see ~GraphKit)
74 _worklist_size = _gvn.C->igvn_worklist()->size();
75 }
76 #endif
77 }
78
79 // Private constructor for parser.
80 GraphKit::GraphKit()
81 : Phase(Phase::Parser),
82 _env(C->env()),
83 _gvn(*C->initial_gvn()),
84 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
85 {
86 _exceptions = nullptr;
87 set_map(nullptr);
88 debug_only(_sp = -99);
89 debug_only(set_bci(-99));
90 }
91
92
93
94 //---------------------------clean_stack---------------------------------------
95 // Clear away rubbish from the stack area of the JVM state.
96 // This destroys any arguments that may be waiting on the stack.
852 if (PrintMiscellaneous && (Verbose || WizardMode)) {
853 tty->print_cr("Zombie local %d: ", local);
854 jvms->dump();
855 }
856 return false;
857 }
858 }
859 }
860 return true;
861 }
862
863 #endif //ASSERT
864
865 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.
866 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
867 ciMethod* cur_method = jvms->method();
868 int cur_bci = jvms->bci();
869 if (cur_method != nullptr && cur_bci != InvocationEntryBci) {
870 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
871 return Interpreter::bytecode_should_reexecute(code) ||
872 (is_anewarray && (code == Bytecodes::_multianewarray));
873 // Reexecute _multianewarray bytecode which was replaced with
874 // sequence of [a]newarray. See Parse::do_multianewarray().
875 //
876 // Note: interpreter should not have it set since this optimization
877 // is limited by dimensions and guarded by flag so in some cases
878 // multianewarray() runtime calls will be generated and
879 // the bytecode should not be reexecutes (stack will not be reset).
880 } else {
881 return false;
882 }
883 }
884
885 // Helper function for adding JVMState and debug information to node
886 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
887 // Add the safepoint edges to the call (or other safepoint).
888
889 // Make sure dead locals are set to top. This
890 // should help register allocation time and cut down on the size
891 // of the deoptimization information.
892 assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
943 }
944
945 // Presize the call:
946 DEBUG_ONLY(uint non_debug_edges = call->req());
947 call->add_req_batch(top(), youngest_jvms->debug_depth());
948 assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
949
950 // Set up edges so that the call looks like this:
951 // Call [state:] ctl io mem fptr retadr
952 // [parms:] parm0 ... parmN
953 // [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
954 // [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
955 // [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
956 // Note that caller debug info precedes callee debug info.
957
958 // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
959 uint debug_ptr = call->req();
960
961 // Loop over the map input edges associated with jvms, add them
962 // to the call node, & reset all offsets to match call node array.
963
964 JVMState* callee_jvms = nullptr;
965 for (JVMState* in_jvms = youngest_jvms; in_jvms != nullptr; ) {
966 uint debug_end = debug_ptr;
967 uint debug_start = debug_ptr - in_jvms->debug_size();
968 debug_ptr = debug_start; // back up the ptr
969
970 uint p = debug_start; // walks forward in [debug_start, debug_end)
971 uint j, k, l;
972 SafePointNode* in_map = in_jvms->map();
973 out_jvms->set_map(call);
974
975 if (can_prune_locals) {
976 assert(in_jvms->method() == out_jvms->method(), "sanity");
977 // If the current throw can reach an exception handler in this JVMS,
978 // then we must keep everything live that can reach that handler.
979 // As a quick and dirty approximation, we look for any handlers at all.
980 if (in_jvms->method()->has_exception_handlers()) {
981 can_prune_locals = false;
982 }
983 }
984
985 // Add the Locals
986 k = in_jvms->locoff();
987 l = in_jvms->loc_size();
988 out_jvms->set_locoff(p);
989 if (!can_prune_locals) {
990 for (j = 0; j < l; j++) {
991 Node* val = in_map->in(k + j);
992 // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state
993 if (val->is_InlineType() && val->isa_InlineType()->is_larval() && callee_jvms != nullptr &&
994 callee_jvms->method()->is_object_constructor() && callee_jvms->method()->holder()->is_inlinetype() && val == in_map->argument(in_jvms, 0)) {
995 val = callee_jvms->map()->local(callee_jvms, 0); // Receiver
996 }
997 call->set_req(p++, val);
998 }
999 } else {
1000 p += l; // already set to top above by add_req_batch
1001 }
1002
1003 // Add the Expression Stack
1004 k = in_jvms->stkoff();
1005 l = in_jvms->sp();
1006 out_jvms->set_stkoff(p);
1007 if (!can_prune_locals) {
1008 for (j = 0; j < l; j++) {
1009 Node* val = in_map->in(k + j);
1010 // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state
1011 if (val->is_InlineType() && val->isa_InlineType()->is_larval() && callee_jvms != nullptr &&
1012 callee_jvms->method()->is_object_constructor() && callee_jvms->method()->holder()->is_inlinetype() && val == in_map->argument(in_jvms, 0)) {
1013 val = callee_jvms->map()->local(callee_jvms, 0); // Receiver
1014 }
1015 call->set_req(p++, val);
1016 }
1017 } else if (can_prune_locals && stack_slots_not_pruned != 0) {
1018 // Divide stack into {S0,...,S1}, where S0 is set to top.
1019 uint s1 = stack_slots_not_pruned;
1020 stack_slots_not_pruned = 0; // for next iteration
1021 if (s1 > l) s1 = l;
1022 uint s0 = l - s1;
1023 p += s0; // skip the tops preinstalled by add_req_batch
1024 for (j = s0; j < l; j++)
1025 call->set_req(p++, in_map->in(k+j));
1026 } else {
1027 p += l; // already set to top above by add_req_batch
1028 }
1029
1030 // Add the Monitors
1031 k = in_jvms->monoff();
1032 l = in_jvms->mon_size();
1033 out_jvms->set_monoff(p);
1034 for (j = 0; j < l; j++)
1035 call->set_req(p++, in_map->in(k+j));
1036
1037 // Copy any scalar object fields.
1038 k = in_jvms->scloff();
1039 l = in_jvms->scl_size();
1040 out_jvms->set_scloff(p);
1041 for (j = 0; j < l; j++)
1042 call->set_req(p++, in_map->in(k+j));
1043
1044 // Finish the new jvms.
1045 out_jvms->set_endoff(p);
1046
1047 assert(out_jvms->endoff() == debug_end, "fill ptr must match");
1048 assert(out_jvms->depth() == in_jvms->depth(), "depth must match");
1049 assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match");
1050 assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match");
1051 assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match");
1052 assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
1053
1054 // Update the two tail pointers in parallel.
1055 callee_jvms = out_jvms;
1056 out_jvms = out_jvms->caller();
1057 in_jvms = in_jvms->caller();
1058 }
1059
1060 assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
1061
1062 // Test the correctness of JVMState::debug_xxx accessors:
1063 assert(call->jvms()->debug_start() == non_debug_edges, "");
1064 assert(call->jvms()->debug_end() == call->req(), "");
1065 assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
1066 }
1067
1068 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
1069 Bytecodes::Code code = java_bc();
1070 if (code == Bytecodes::_wide) {
1071 code = method()->java_code_at_bci(bci() + 1);
1072 }
1073
1074 if (code != Bytecodes::_illegal) {
1075 depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
1211 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1212 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1213 return _gvn.transform( new AndLNode(conv, mask) );
1214 }
1215
1216 Node* GraphKit::ConvL2I(Node* offset) {
1217 // short-circuit a common case
1218 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1219 if (offset_con != (jlong)Type::OffsetBot) {
1220 return intcon((int) offset_con);
1221 }
1222 return _gvn.transform( new ConvL2INode(offset));
1223 }
1224
1225 //-------------------------load_object_klass-----------------------------------
1226 Node* GraphKit::load_object_klass(Node* obj) {
1227 // Special-case a fresh allocation to avoid building nodes:
1228 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1229 if (akls != nullptr) return akls;
1230 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1231 return _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
1232 }
1233
1234 //-------------------------load_array_length-----------------------------------
1235 Node* GraphKit::load_array_length(Node* array) {
1236 // Special-case a fresh allocation to avoid building nodes:
1237 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array);
1238 Node *alen;
1239 if (alloc == nullptr) {
1240 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1241 alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
1242 } else {
1243 alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false);
1244 }
1245 return alen;
1246 }
1247
1248 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc,
1249 const TypeOopPtr* oop_type,
1250 bool replace_length_in_map) {
1251 Node* length = alloc->Ideal_length();
1260 replace_in_map(length, ccast);
1261 }
1262 return ccast;
1263 }
1264 }
1265 return length;
1266 }
1267
1268 //------------------------------do_null_check----------------------------------
1269 // Helper function to do a null pointer check. Returned value is
1270 // the incoming address with null casted away. You are allowed to use the
1271 // not-null value only if you are control dependent on the test.
1272 #ifndef PRODUCT
1273 extern uint explicit_null_checks_inserted,
1274 explicit_null_checks_elided;
1275 #endif
1276 Node* GraphKit::null_check_common(Node* value, BasicType type,
1277 // optional arguments for variations:
1278 bool assert_null,
1279 Node* *null_control,
1280 bool speculative,
1281 bool is_init_check) {
1282 assert(!assert_null || null_control == nullptr, "not both at once");
1283 if (stopped()) return top();
1284 NOT_PRODUCT(explicit_null_checks_inserted++);
1285
1286 if (value->is_InlineType()) {
1287 // Null checking a scalarized but nullable inline type. Check the IsInit
1288 // input instead of the oop input to avoid keeping buffer allocations alive.
1289 InlineTypeNode* vtptr = value->as_InlineType();
1290 while (vtptr->get_oop()->is_InlineType()) {
1291 vtptr = vtptr->get_oop()->as_InlineType();
1292 }
1293 null_check_common(vtptr->get_is_init(), T_INT, assert_null, null_control, speculative, true);
1294 if (stopped()) {
1295 return top();
1296 }
1297 if (assert_null) {
1298 // TODO 8284443 Scalarize here (this currently leads to compilation bailouts)
1299 // vtptr = InlineTypeNode::make_null(_gvn, vtptr->type()->inline_klass());
1300 // replace_in_map(value, vtptr);
1301 // return vtptr;
1302 return null();
1303 }
1304 bool do_replace_in_map = (null_control == nullptr || (*null_control) == top());
1305 return cast_not_null(value, do_replace_in_map);
1306 }
1307
1308 // Construct null check
1309 Node *chk = nullptr;
1310 switch(type) {
1311 case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1312 case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;
1313 case T_ARRAY : // fall through
1314 type = T_OBJECT; // simplify further tests
1315 case T_OBJECT : {
1316 const Type *t = _gvn.type( value );
1317
1318 const TypeOopPtr* tp = t->isa_oopptr();
1319 if (tp != nullptr && !tp->is_loaded()
1320 // Only for do_null_check, not any of its siblings:
1321 && !assert_null && null_control == nullptr) {
1322 // Usually, any field access or invocation on an unloaded oop type
1323 // will simply fail to link, since the statically linked class is
1324 // likely also to be unloaded. However, in -Xcomp mode, sometimes
1325 // the static class is loaded but the sharper oop type is not.
1326 // Rather than checking for this obscure case in lots of places,
1327 // we simply observe that a null check on an unloaded class
1391 }
1392 Node *oldcontrol = control();
1393 set_control(cfg);
1394 Node *res = cast_not_null(value);
1395 set_control(oldcontrol);
1396 NOT_PRODUCT(explicit_null_checks_elided++);
1397 return res;
1398 }
1399 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1400 if (cfg == nullptr) break; // Quit at region nodes
1401 depth++;
1402 }
1403 }
1404
1405 //-----------
1406 // Branch to failure if null
1407 float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
1408 Deoptimization::DeoptReason reason;
1409 if (assert_null) {
1410 reason = Deoptimization::reason_null_assert(speculative);
1411 } else if (type == T_OBJECT || is_init_check) {
1412 reason = Deoptimization::reason_null_check(speculative);
1413 } else {
1414 reason = Deoptimization::Reason_div0_check;
1415 }
1416 // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1417 // ciMethodData::has_trap_at will return a conservative -1 if any
1418 // must-be-null assertion has failed. This could cause performance
1419 // problems for a method after its first do_null_assert failure.
1420 // Consider using 'Reason_class_check' instead?
1421
1422 // To cause an implicit null check, we set the not-null probability
1423 // to the maximum (PROB_MAX). For an explicit check the probability
1424 // is set to a smaller value.
1425 if (null_control != nullptr || too_many_traps(reason)) {
1426 // probability is less likely
1427 ok_prob = PROB_LIKELY_MAG(3);
1428 } else if (!assert_null &&
1429 (ImplicitNullCheckThreshold > 0) &&
1430 method() != nullptr &&
1431 (method()->method_data()->trap_count(reason)
1465 }
1466
1467 if (assert_null) {
1468 // Cast obj to null on this path.
1469 replace_in_map(value, zerocon(type));
1470 return zerocon(type);
1471 }
1472
1473 // Cast obj to not-null on this path, if there is no null_control.
1474 // (If there is a null_control, a non-null value may come back to haunt us.)
1475 if (type == T_OBJECT) {
1476 Node* cast = cast_not_null(value, false);
1477 if (null_control == nullptr || (*null_control) == top())
1478 replace_in_map(value, cast);
1479 value = cast;
1480 }
1481
1482 return value;
1483 }
1484
1485 //------------------------------cast_not_null----------------------------------
1486 // Cast obj to not-null on this path
1487 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1488 if (obj->is_InlineType()) {
1489 // TODO 8325106 Can we avoid cloning?
1490 Node* vt = obj->clone();
1491 vt->as_InlineType()->set_is_init(_gvn);
1492 vt = _gvn.transform(vt);
1493 if (do_replace_in_map) {
1494 replace_in_map(obj, vt);
1495 }
1496 return vt;
1497 }
1498 const Type *t = _gvn.type(obj);
1499 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1500 // Object is already not-null?
1501 if( t == t_not_null ) return obj;
1502
1503 Node* cast = new CastPPNode(control(), obj,t_not_null);
1504 cast = _gvn.transform( cast );
1505
1506 // Scan for instances of 'obj' in the current JVM mapping.
1507 // These instances are known to be not-null after the test.
1508 if (do_replace_in_map)
1509 replace_in_map(obj, cast);
1510
1511 return cast; // Return casted value
1512 }
1513
1514 // Sometimes in intrinsics, we implicitly know an object is not null
1515 // (there's no actual null check) so we can cast it to not null. In
1516 // the course of optimizations, the input to the cast can become null.
1517 // In that case that data path will die and we need the control path
1603 // These are layered on top of the factory methods in LoadNode and StoreNode,
1604 // and integrate with the parser's memory state and _gvn engine.
1605 //
1606
1607 // factory methods in "int adr_idx"
1608 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1609 int adr_idx,
1610 MemNode::MemOrd mo,
1611 LoadNode::ControlDependency control_dependency,
1612 bool require_atomic_access,
1613 bool unaligned,
1614 bool mismatched,
1615 bool unsafe,
1616 uint8_t barrier_data) {
1617 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1618 const TypePtr* adr_type = nullptr; // debug-mode-only argument
1619 debug_only(adr_type = C->get_adr_type(adr_idx));
1620 Node* mem = memory(adr_idx);
1621 Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
1622 ld = _gvn.transform(ld);
1623
1624 if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1625 // Improve graph before escape analysis and boxing elimination.
1626 record_for_igvn(ld);
1627 if (ld->is_DecodeN()) {
1628 // Also record the actual load (LoadN) in case ld is DecodeN
1629 assert(ld->in(1)->Opcode() == Op_LoadN, "Assumption invalid: input to DecodeN is not LoadN");
1630 record_for_igvn(ld->in(1));
1631 }
1632 }
1633 return ld;
1634 }
1635
1636 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1637 int adr_idx,
1638 MemNode::MemOrd mo,
1639 bool require_atomic_access,
1640 bool unaligned,
1641 bool mismatched,
1642 bool unsafe,
1643 int barrier_data) {
1655 if (unsafe) {
1656 st->as_Store()->set_unsafe_access();
1657 }
1658 st->as_Store()->set_barrier_data(barrier_data);
1659 st = _gvn.transform(st);
1660 set_memory(st, adr_idx);
1661 // Back-to-back stores can only remove intermediate store with DU info
1662 // so push on worklist for optimizer.
1663 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1664 record_for_igvn(st);
1665
1666 return st;
1667 }
1668
1669 Node* GraphKit::access_store_at(Node* obj,
1670 Node* adr,
1671 const TypePtr* adr_type,
1672 Node* val,
1673 const Type* val_type,
1674 BasicType bt,
1675 DecoratorSet decorators,
1676 bool safe_for_replace) {
1677 // Transformation of a value which could be null pointer (CastPP #null)
1678 // could be delayed during Parse (for example, in adjust_map_after_if()).
1679 // Execute transformation here to avoid barrier generation in such case.
1680 if (_gvn.type(val) == TypePtr::NULL_PTR) {
1681 val = _gvn.makecon(TypePtr::NULL_PTR);
1682 }
1683
1684 if (stopped()) {
1685 return top(); // Dead path ?
1686 }
1687
1688 assert(val != nullptr, "not dead path");
1689 if (val->is_InlineType()) {
1690 // Store to non-flat field. Buffer the inline type and make sure
1691 // the store is re-executed if the allocation triggers deoptimization.
1692 PreserveReexecuteState preexecs(this);
1693 jvms()->set_should_reexecute(true);
1694 val = val->as_InlineType()->buffer(this, safe_for_replace);
1695 }
1696
1697 C2AccessValuePtr addr(adr, adr_type);
1698 C2AccessValue value(val, val_type);
1699 C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1700 if (access.is_raw()) {
1701 return _barrier_set->BarrierSetC2::store_at(access, value);
1702 } else {
1703 return _barrier_set->store_at(access, value);
1704 }
1705 }
1706
1707 Node* GraphKit::access_load_at(Node* obj, // containing obj
1708 Node* adr, // actual address to store val at
1709 const TypePtr* adr_type,
1710 const Type* val_type,
1711 BasicType bt,
1712 DecoratorSet decorators,
1713 Node* ctl) {
1714 if (stopped()) {
1715 return top(); // Dead path ?
1716 }
1717
1718 C2AccessValuePtr addr(adr, adr_type);
1719 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr, ctl);
1720 if (access.is_raw()) {
1721 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1722 } else {
1723 return _barrier_set->load_at(access, val_type);
1724 }
1725 }
1726
1727 Node* GraphKit::access_load(Node* adr, // actual address to load val at
1728 const Type* val_type,
1729 BasicType bt,
1730 DecoratorSet decorators) {
1731 if (stopped()) {
1732 return top(); // Dead path ?
1733 }
1734
1735 C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());
1736 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, nullptr, addr);
1737 if (access.is_raw()) {
1738 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1739 } else {
1804 Node* new_val,
1805 const Type* value_type,
1806 BasicType bt,
1807 DecoratorSet decorators) {
1808 C2AccessValuePtr addr(adr, adr_type);
1809 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1810 if (access.is_raw()) {
1811 return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1812 } else {
1813 return _barrier_set->atomic_add_at(access, new_val, value_type);
1814 }
1815 }
1816
1817 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
1818 return _barrier_set->clone(this, src, dst, size, is_array);
1819 }
1820
1821 //-------------------------array_element_address-------------------------
1822 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1823 const TypeInt* sizetype, Node* ctrl) {
1824 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
1825 uint shift = arytype->is_flat() ? arytype->flat_log_elem_size() : exact_log2(type2aelembytes(elembt));
1826 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1827
1828 // short-circuit a common case (saves lots of confusing waste motion)
1829 jint idx_con = find_int_con(idx, -1);
1830 if (idx_con >= 0) {
1831 intptr_t offset = header + ((intptr_t)idx_con << shift);
1832 return basic_plus_adr(ary, offset);
1833 }
1834
1835 // must be correct type for alignment purposes
1836 Node* base = basic_plus_adr(ary, header);
1837 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1838 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1839 return basic_plus_adr(ary, base, scale);
1840 }
1841
1842 //-------------------------load_array_element-------------------------
1843 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
1844 const Type* elemtype = arytype->elem();
1845 BasicType elembt = elemtype->array_element_basic_type();
1846 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1847 if (elembt == T_NARROWOOP) {
1848 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1849 }
1850 Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,
1851 IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0));
1852 return ld;
1853 }
1854
1855 //-------------------------set_arguments_for_java_call-------------------------
1856 // Arguments (pre-popped from the stack) are taken from the JVMS.
1857 void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline) {
1858 PreserveReexecuteState preexecs(this);
1859 if (EnableValhalla) {
1860 // Make sure the call is "re-executed", if buffering of inline type arguments triggers deoptimization.
1861 // At this point, the call hasn't been executed yet, so we will only ever execute the call once.
1862 jvms()->set_should_reexecute(true);
1863 int arg_size = method()->get_declared_signature_at_bci(bci())->arg_size_for_bc(java_bc());
1864 inc_sp(arg_size);
1865 }
1866 // Add the call arguments
1867 const TypeTuple* domain = call->tf()->domain_sig();
1868 uint nargs = domain->cnt();
1869 int arg_num = 0;
1870 for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1871 Node* arg = argument(i-TypeFunc::Parms);
1872 const Type* t = domain->field_at(i);
1873 // TODO 8284443 A static call to a mismatched method should still be scalarized
1874 if (t->is_inlinetypeptr() && !call->method()->get_Method()->mismatch() && call->method()->is_scalarized_arg(arg_num)) {
1875 // We don't pass inline type arguments by reference but instead pass each field of the inline type
1876 if (!arg->is_InlineType()) {
1877 assert(_gvn.type(arg)->is_zero_type() && !t->inline_klass()->is_null_free(), "Unexpected argument type");
1878 arg = InlineTypeNode::make_from_oop(this, arg, t->inline_klass(), t->inline_klass()->is_null_free());
1879 }
1880 InlineTypeNode* vt = arg->as_InlineType();
1881 vt->pass_fields(this, call, idx, true, !t->maybe_null());
1882 // If an inline type argument is passed as fields, attach the Method* to the call site
1883 // to be able to access the extended signature later via attached_method_before_pc().
1884 // For example, see CompiledMethod::preserve_callee_argument_oops().
1885 call->set_override_symbolic_info(true);
1886 // Register an evol dependency on the callee method to make sure that this method is deoptimized and
1887 // re-compiled with a non-scalarized calling convention if the callee method is later marked as mismatched.
1888 C->dependencies()->assert_evol_method(call->method());
1889 arg_num++;
1890 continue;
1891 } else if (arg->is_InlineType()) {
1892 // Pass inline type argument via oop to callee
1893 arg = arg->as_InlineType()->buffer(this);
1894 }
1895 if (t != Type::HALF) {
1896 arg_num++;
1897 }
1898 call->init_req(idx++, arg);
1899 }
1900 }
1901
1902 //---------------------------set_edges_for_java_call---------------------------
1903 // Connect a newly created call into the current JVMS.
1904 // A return value node (if any) is returned from set_edges_for_java_call.
1905 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1906
1907 // Add the predefined inputs:
1908 call->init_req( TypeFunc::Control, control() );
1909 call->init_req( TypeFunc::I_O , i_o() );
1910 call->init_req( TypeFunc::Memory , reset_memory() );
1911 call->init_req( TypeFunc::FramePtr, frameptr() );
1912 call->init_req( TypeFunc::ReturnAdr, top() );
1913
1914 add_safepoint_edges(call, must_throw);
1915
1916 Node* xcall = _gvn.transform(call);
1917
1918 if (xcall == top()) {
1919 set_control(top());
1920 return;
1921 }
1922 assert(xcall == call, "call identity is stable");
1923
1924 // Re-use the current map to produce the result.
1925
1926 set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1927 set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj)));
1928 set_all_memory_call(xcall, separate_io_proj);
1929
1930 //return xcall; // no need, caller already has it
1931 }
1932
1933 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1934 if (stopped()) return top(); // maybe the call folded up?
1935
1936 // Note: Since any out-of-line call can produce an exception,
1937 // we always insert an I_O projection from the call into the result.
1938
1939 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1940
1941 if (separate_io_proj) {
1942 // The caller requested separate projections be used by the fall
1943 // through and exceptional paths, so replace the projections for
1944 // the fall through path.
1945 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1946 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1947 }
1948
1949 // Capture the return value, if any.
1950 Node* ret;
1951 if (call->method() == nullptr || call->method()->return_type()->basic_type() == T_VOID) {
1952 ret = top();
1953 } else if (call->tf()->returns_inline_type_as_fields()) {
1954 // Return of multiple values (inline type fields): we create a
1955 // InlineType node, each field is a projection from the call.
1956 ciInlineKlass* vk = call->method()->return_type()->as_inline_klass();
1957 uint base_input = TypeFunc::Parms;
1958 ret = InlineTypeNode::make_from_multi(this, call, vk, base_input, false, false);
1959 } else {
1960 ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1961 ciType* t = call->method()->return_type();
1962 if (t->is_klass()) {
1963 const Type* type = TypeOopPtr::make_from_klass(t->as_klass());
1964 if (type->is_inlinetypeptr()) {
1965 ret = InlineTypeNode::make_from_oop(this, ret, type->inline_klass(), type->inline_klass()->is_null_free());
1966 }
1967 }
1968 }
1969
1970 // We just called the constructor on a value type receiver. Reload it from the buffer
1971 if (call->method()->is_object_constructor() && call->method()->holder()->is_inlinetype()) {
1972 InlineTypeNode* receiver = call->in(TypeFunc::Parms)->as_InlineType();
1973 assert(receiver->is_larval(), "must be larval");
1974 assert(receiver->is_allocated(&gvn()), "larval must be buffered");
1975 InlineTypeNode* reloaded = InlineTypeNode::make_from_oop(this, receiver->get_oop(), receiver->bottom_type()->inline_klass(), true);
1976 assert(!reloaded->is_larval(), "should not be larval anymore");
1977 replace_in_map(receiver, reloaded);
1978 }
1979
1980 return ret;
1981 }
1982
1983 //--------------------set_predefined_input_for_runtime_call--------------------
1984 // Reading and setting the memory state is way conservative here.
1985 // The real problem is that I am not doing real Type analysis on memory,
1986 // so I cannot distinguish card mark stores from other stores. Across a GC
1987 // point the Store Barrier and the card mark memory has to agree. I cannot
1988 // have a card mark store and its barrier split across the GC point from
1989 // either above or below. Here I get that to happen by reading ALL of memory.
1990 // A better answer would be to separate out card marks from other memory.
1991 // For now, return the input memory state, so that it can be reused
1992 // after the call, if this call has restricted memory effects.
1993 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
1994 // Set fixed predefined input arguments
1995 Node* memory = reset_memory();
1996 Node* m = narrow_mem == nullptr ? memory : narrow_mem;
1997 call->init_req( TypeFunc::Control, control() );
1998 call->init_req( TypeFunc::I_O, top() ); // does no i/o
1999 call->init_req( TypeFunc::Memory, m ); // may gc ptrs
2050 if (use->is_MergeMem()) {
2051 wl.push(use);
2052 }
2053 }
2054 }
2055
2056 // Replace the call with the current state of the kit.
2057 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
2058 JVMState* ejvms = nullptr;
2059 if (has_exceptions()) {
2060 ejvms = transfer_exceptions_into_jvms();
2061 }
2062
2063 ReplacedNodes replaced_nodes = map()->replaced_nodes();
2064 ReplacedNodes replaced_nodes_exception;
2065 Node* ex_ctl = top();
2066
2067 SafePointNode* final_state = stop();
2068
2069 // Find all the needed outputs of this call
2070 CallProjections* callprojs = call->extract_projections(true);
2071
2072 Unique_Node_List wl;
2073 Node* init_mem = call->in(TypeFunc::Memory);
2074 Node* final_mem = final_state->in(TypeFunc::Memory);
2075 Node* final_ctl = final_state->in(TypeFunc::Control);
2076 Node* final_io = final_state->in(TypeFunc::I_O);
2077
2078 // Replace all the old call edges with the edges from the inlining result
2079 if (callprojs->fallthrough_catchproj != nullptr) {
2080 C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl);
2081 }
2082 if (callprojs->fallthrough_memproj != nullptr) {
2083 if (final_mem->is_MergeMem()) {
2084 // Parser's exits MergeMem was not transformed but may be optimized
2085 final_mem = _gvn.transform(final_mem);
2086 }
2087 C->gvn_replace_by(callprojs->fallthrough_memproj, final_mem);
2088 add_mergemem_users_to_worklist(wl, final_mem);
2089 }
2090 if (callprojs->fallthrough_ioproj != nullptr) {
2091 C->gvn_replace_by(callprojs->fallthrough_ioproj, final_io);
2092 }
2093
2094 // Replace the result with the new result if it exists and is used
2095 if (callprojs->resproj[0] != nullptr && result != nullptr) {
2096 // If the inlined code is dead, the result projections for an inline type returned as
2097 // fields have not been replaced. They will go away once the call is replaced by TOP below.
2098 assert(callprojs->nb_resproj == 1 || (call->tf()->returns_inline_type_as_fields() && stopped()),
2099 "unexpected number of results");
2100 C->gvn_replace_by(callprojs->resproj[0], result);
2101 }
2102
2103 if (ejvms == nullptr) {
2104 // No exception edges to simply kill off those paths
2105 if (callprojs->catchall_catchproj != nullptr) {
2106 C->gvn_replace_by(callprojs->catchall_catchproj, C->top());
2107 }
2108 if (callprojs->catchall_memproj != nullptr) {
2109 C->gvn_replace_by(callprojs->catchall_memproj, C->top());
2110 }
2111 if (callprojs->catchall_ioproj != nullptr) {
2112 C->gvn_replace_by(callprojs->catchall_ioproj, C->top());
2113 }
2114 // Replace the old exception object with top
2115 if (callprojs->exobj != nullptr) {
2116 C->gvn_replace_by(callprojs->exobj, C->top());
2117 }
2118 } else {
2119 GraphKit ekit(ejvms);
2120
2121 // Load my combined exception state into the kit, with all phis transformed:
2122 SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
2123 replaced_nodes_exception = ex_map->replaced_nodes();
2124
2125 Node* ex_oop = ekit.use_exception_state(ex_map);
2126
2127 if (callprojs->catchall_catchproj != nullptr) {
2128 C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control());
2129 ex_ctl = ekit.control();
2130 }
2131 if (callprojs->catchall_memproj != nullptr) {
2132 Node* ex_mem = ekit.reset_memory();
2133 C->gvn_replace_by(callprojs->catchall_memproj, ex_mem);
2134 add_mergemem_users_to_worklist(wl, ex_mem);
2135 }
2136 if (callprojs->catchall_ioproj != nullptr) {
2137 C->gvn_replace_by(callprojs->catchall_ioproj, ekit.i_o());
2138 }
2139
2140 // Replace the old exception object with the newly created one
2141 if (callprojs->exobj != nullptr) {
2142 C->gvn_replace_by(callprojs->exobj, ex_oop);
2143 }
2144 }
2145
2146 // Disconnect the call from the graph
2147 call->disconnect_inputs(C);
2148 C->gvn_replace_by(call, C->top());
2149
2150 // Clean up any MergeMems that feed other MergeMems since the
2151 // optimizer doesn't like that.
2152 while (wl.size() > 0) {
2153 _gvn.transform(wl.pop());
2154 }
2155
2156 if (callprojs->fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) {
2157 replaced_nodes.apply(C, final_ctl);
2158 }
2159 if (!ex_ctl->is_top() && do_replaced_nodes) {
2160 replaced_nodes_exception.apply(C, ex_ctl);
2161 }
2162 }
2163
2164
2165 //------------------------------increment_counter------------------------------
2166 // for statistics: increment a VM counter by 1
2167
2168 void GraphKit::increment_counter(address counter_addr) {
2169 Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2170 increment_counter(adr1);
2171 }
2172
2173 void GraphKit::increment_counter(Node* counter_addr) {
2174 int adr_type = Compile::AliasIdxRaw;
2175 Node* ctrl = control();
2176 Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered);
2335 *
2336 * @param n node that the type applies to
2337 * @param exact_kls type from profiling
2338 * @param maybe_null did profiling see null?
2339 *
2340 * @return node with improved type
2341 */
2342 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2343 const Type* current_type = _gvn.type(n);
2344 assert(UseTypeSpeculation, "type speculation must be on");
2345
2346 const TypePtr* speculative = current_type->speculative();
2347
2348 // Should the klass from the profile be recorded in the speculative type?
2349 if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2350 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls, Type::trust_interfaces);
2351 const TypeOopPtr* xtype = tklass->as_instance_type();
2352 assert(xtype->klass_is_exact(), "Should be exact");
2353 // Any reason to believe n is not null (from this profiling or a previous one)?
2354 assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2355 const TypePtr* ptr = (ptr_kind != ProfileNeverNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2356 // record the new speculative type's depth
2357 speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2358 speculative = speculative->with_inline_depth(jvms()->depth());
2359 } else if (current_type->would_improve_ptr(ptr_kind)) {
2360 // Profiling report that null was never seen so we can change the
2361 // speculative type to non null ptr.
2362 if (ptr_kind == ProfileAlwaysNull) {
2363 speculative = TypePtr::NULL_PTR;
2364 } else {
2365 assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2366 const TypePtr* ptr = TypePtr::NOTNULL;
2367 if (speculative != nullptr) {
2368 speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2369 } else {
2370 speculative = ptr;
2371 }
2372 }
2373 }
2374
2375 if (speculative != current_type->speculative()) {
2376 // Build a type with a speculative type (what we think we know
2377 // about the type but will need a guard when we use it)
2378 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative);
2379 // We're changing the type, we need a new CheckCast node to carry
2380 // the new type. The new type depends on the control: what
2381 // profiling tells us is only valid from here as far as we can
2382 // tell.
2383 Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2384 cast = _gvn.transform(cast);
2385 replace_in_map(n, cast);
2386 n = cast;
2387 }
2388
2389 return n;
2390 }
2391
2392 /**
2393 * Record profiling data from receiver profiling at an invoke with the
2394 * type system so that it can propagate it (speculation)
2395 *
2396 * @param n receiver node
2397 *
2398 * @return node with improved type
2399 */
2400 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2401 if (!UseTypeSpeculation) {
2402 return n;
2403 }
2404 ciKlass* exact_kls = profile_has_unique_klass();
2405 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2406 if ((java_bc() == Bytecodes::_checkcast ||
2407 java_bc() == Bytecodes::_instanceof ||
2408 java_bc() == Bytecodes::_aastore) &&
2409 method()->method_data()->is_mature()) {
2410 ciProfileData* data = method()->method_data()->bci_to_data(bci());
2411 if (data != nullptr) {
2412 if (java_bc() == Bytecodes::_aastore) {
2413 ciKlass* array_type = nullptr;
2414 ciKlass* element_type = nullptr;
2415 ProfilePtrKind element_ptr = ProfileMaybeNull;
2416 bool flat_array = true;
2417 bool null_free_array = true;
2418 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
2419 exact_kls = element_type;
2420 ptr_kind = element_ptr;
2421 } else {
2422 if (!data->as_BitData()->null_seen()) {
2423 ptr_kind = ProfileNeverNull;
2424 } else {
2425 assert(data->is_ReceiverTypeData(), "bad profile data type");
2426 ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2427 uint i = 0;
2428 for (; i < call->row_limit(); i++) {
2429 ciKlass* receiver = call->receiver(i);
2430 if (receiver != nullptr) {
2431 break;
2432 }
2433 }
2434 ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2435 }
2436 }
2437 }
2438 }
2439 return record_profile_for_speculation(n, exact_kls, ptr_kind);
2440 }
2441
2442 /**
2443 * Record profiling data from argument profiling at an invoke with the
2444 * type system so that it can propagate it (speculation)
2445 *
2446 * @param dest_method target method for the call
2447 * @param bc what invoke bytecode is this?
2448 */
2449 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2450 if (!UseTypeSpeculation) {
2451 return;
2452 }
2453 const TypeFunc* tf = TypeFunc::make(dest_method);
2454 int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2455 int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2456 for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2457 const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2458 if (is_reference_type(targ->basic_type())) {
2459 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2460 ciKlass* better_type = nullptr;
2461 if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2462 record_profile_for_speculation(argument(j), better_type, ptr_kind);
2463 }
2464 i++;
2465 }
2466 }
2467 }
2468
2469 /**
2470 * Record profiling data from parameter profiling at an invoke with
2471 * the type system so that it can propagate it (speculation)
2472 */
2473 void GraphKit::record_profiled_parameters_for_speculation() {
2474 if (!UseTypeSpeculation) {
2475 return;
2476 }
2477 for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
2491 * the type system so that it can propagate it (speculation)
2492 */
2493 void GraphKit::record_profiled_return_for_speculation() {
2494 if (!UseTypeSpeculation) {
2495 return;
2496 }
2497 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2498 ciKlass* better_type = nullptr;
2499 if (method()->return_profiled_type(bci(), better_type, ptr_kind)) {
2500 // If profiling reports a single type for the return value,
2501 // feed it to the type system so it can propagate it as a
2502 // speculative type
2503 record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind);
2504 }
2505 }
2506
2507 void GraphKit::round_double_arguments(ciMethod* dest_method) {
2508 if (Matcher::strict_fp_requires_explicit_rounding) {
2509 // (Note: TypeFunc::make has a cache that makes this fast.)
2510 const TypeFunc* tf = TypeFunc::make(dest_method);
2511 int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2512 for (int j = 0; j < nargs; j++) {
2513 const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2514 if (targ->basic_type() == T_DOUBLE) {
2515 // If any parameters are doubles, they must be rounded before
2516 // the call, dprecision_rounding does gvn.transform
2517 Node *arg = argument(j);
2518 arg = dprecision_rounding(arg);
2519 set_argument(j, arg);
2520 }
2521 }
2522 }
2523 }
2524
2525 // rounding for strict float precision conformance
2526 Node* GraphKit::precision_rounding(Node* n) {
2527 if (Matcher::strict_fp_requires_explicit_rounding) {
2528 #ifdef IA32
2529 if (UseSSE == 0) {
2530 return _gvn.transform(new RoundFloatNode(0, n));
2531 }
2532 #else
2533 Unimplemented();
2642 // The first null ends the list.
2643 Node* parm0, Node* parm1,
2644 Node* parm2, Node* parm3,
2645 Node* parm4, Node* parm5,
2646 Node* parm6, Node* parm7) {
2647 assert(call_addr != nullptr, "must not call null targets");
2648
2649 // Slow-path call
2650 bool is_leaf = !(flags & RC_NO_LEAF);
2651 bool has_io = (!is_leaf && !(flags & RC_NO_IO));
2652 if (call_name == nullptr) {
2653 assert(!is_leaf, "must supply name for leaf");
2654 call_name = OptoRuntime::stub_name(call_addr);
2655 }
2656 CallNode* call;
2657 if (!is_leaf) {
2658 call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
2659 } else if (flags & RC_NO_FP) {
2660 call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2661 } else if (flags & RC_VECTOR){
2662 uint num_bits = call_type->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
2663 call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
2664 } else {
2665 call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2666 }
2667
2668 // The following is similar to set_edges_for_java_call,
2669 // except that the memory effects of the call are restricted to AliasIdxRaw.
2670
2671 // Slow path call has no side-effects, uses few values
2672 bool wide_in = !(flags & RC_NARROW_MEM);
2673 bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2674
2675 Node* prev_mem = nullptr;
2676 if (wide_in) {
2677 prev_mem = set_predefined_input_for_runtime_call(call);
2678 } else {
2679 assert(!wide_out, "narrow in => narrow out");
2680 Node* narrow_mem = memory(adr_type);
2681 prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2682 }
2722
2723 if (has_io) {
2724 set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2725 }
2726 return call;
2727
2728 }
2729
2730 // i2b
2731 Node* GraphKit::sign_extend_byte(Node* in) {
2732 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
2733 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
2734 }
2735
2736 // i2s
2737 Node* GraphKit::sign_extend_short(Node* in) {
2738 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
2739 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
2740 }
2741
2742
2743 //------------------------------merge_memory-----------------------------------
2744 // Merge memory from one path into the current memory state.
2745 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
2746 for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
2747 Node* old_slice = mms.force_memory();
2748 Node* new_slice = mms.memory2();
2749 if (old_slice != new_slice) {
2750 PhiNode* phi;
2751 if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
2752 if (mms.is_empty()) {
2753 // clone base memory Phi's inputs for this memory slice
2754 assert(old_slice == mms.base_memory(), "sanity");
2755 phi = PhiNode::make(region, nullptr, Type::MEMORY, mms.adr_type(C));
2756 _gvn.set_type(phi, Type::MEMORY);
2757 for (uint i = 1; i < phi->req(); i++) {
2758 phi->init_req(i, old_slice->in(i));
2759 }
2760 } else {
2761 phi = old_slice->as_Phi(); // Phi was generated already
2762 }
2838
2839 // Fast check for identical types, perhaps identical constants.
2840 // The types can even be identical non-constants, in cases
2841 // involving Array.newInstance, Object.clone, etc.
2842 if (subklass == superklass)
2843 return C->top(); // false path is dead; no test needed.
2844
2845 if (gvn.type(superklass)->singleton()) {
2846 const TypeKlassPtr* superk = gvn.type(superklass)->is_klassptr();
2847 const TypeKlassPtr* subk = gvn.type(subklass)->is_klassptr();
2848
2849 // In the common case of an exact superklass, try to fold up the
2850 // test before generating code. You may ask, why not just generate
2851 // the code and then let it fold up? The answer is that the generated
2852 // code will necessarily include null checks, which do not always
2853 // completely fold away. If they are also needless, then they turn
2854 // into a performance loss. Example:
2855 // Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x;
2856 // Here, the type of 'fa' is often exact, so the store check
2857 // of fa[1]=x will fold up, without testing the nullness of x.
2858
2859 // At macro expansion, we would have already folded the SubTypeCheckNode
2860 // being expanded here because we always perform the static sub type
2861 // check in SubTypeCheckNode::sub() regardless of whether
2862 // StressReflectiveCode is set or not. We can therefore skip this
2863 // static check when StressReflectiveCode is on.
2864 switch (C->static_subtype_check(superk, subk)) {
2865 case Compile::SSC_always_false:
2866 {
2867 Node* always_fail = *ctrl;
2868 *ctrl = gvn.C->top();
2869 return always_fail;
2870 }
2871 case Compile::SSC_always_true:
2872 return C->top();
2873 case Compile::SSC_easy_test:
2874 {
2875 // Just do a direct pointer compare and be done.
2876 IfNode* iff = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_STATIC_FREQUENT, gvn, T_ADDRESS);
2877 *ctrl = gvn.transform(new IfTrueNode(iff));
2878 return gvn.transform(new IfFalseNode(iff));
2879 }
2880 case Compile::SSC_full_test:
2881 break;
2882 default:
2883 ShouldNotReachHere();
3025
3026 // Now do a linear scan of the secondary super-klass array. Again, no real
3027 // performance impact (too rare) but it's gotta be done.
3028 // Since the code is rarely used, there is no penalty for moving it
3029 // out of line, and it can only improve I-cache density.
3030 // The decision to inline or out-of-line this final check is platform
3031 // dependent, and is found in the AD file definition of PartialSubtypeCheck.
3032 Node* psc = gvn.transform(
3033 new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
3034
3035 IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
3036 r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
3037 r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
3038
3039 // Return false path; set default control to true path.
3040 *ctrl = gvn.transform(r_ok_subtype);
3041 return gvn.transform(r_not_subtype);
3042 }
3043
3044 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
3045 const Type* sub_t = _gvn.type(obj_or_subklass);
3046 if (sub_t->make_oopptr() != nullptr && sub_t->make_oopptr()->is_inlinetypeptr()) {
3047 sub_t = TypeKlassPtr::make(sub_t->inline_klass());
3048 obj_or_subklass = makecon(sub_t);
3049 }
3050 bool expand_subtype_check = C->post_loop_opts_phase(); // macro node expansion is over
3051 if (expand_subtype_check) {
3052 MergeMemNode* mem = merged_memory();
3053 Node* ctrl = control();
3054 Node* subklass = obj_or_subklass;
3055 if (!sub_t->isa_klassptr()) {
3056 subklass = load_object_klass(obj_or_subklass);
3057 }
3058
3059 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn, method(), bci());
3060 set_control(ctrl);
3061 return n;
3062 }
3063
3064 Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass, method(), bci()));
3065 Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
3066 IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
3067 set_control(_gvn.transform(new IfTrueNode(iff)));
3068 return _gvn.transform(new IfFalseNode(iff));
3069 }
3070
3071 // Profile-driven exact type check:
3072 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
3073 float prob, Node* *casted_receiver) {
3074 assert(!klass->is_interface(), "no exact type check on interfaces");
3075 Node* fail = top();
3076 const Type* rec_t = _gvn.type(receiver);
3077 if (rec_t->is_inlinetypeptr()) {
3078 if (klass->equals(rec_t->inline_klass())) {
3079 (*casted_receiver) = receiver; // Always passes
3080 } else {
3081 (*casted_receiver) = top(); // Always fails
3082 fail = control();
3083 set_control(top());
3084 }
3085 return fail;
3086 }
3087 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces);
3088 Node* recv_klass = load_object_klass(receiver);
3089 fail = type_check(recv_klass, tklass, prob);
3090
3091 if (!stopped()) {
3092 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3093 const TypeOopPtr* recv_xtype = tklass->as_instance_type();
3094 assert(recv_xtype->klass_is_exact(), "");
3095
3096 if (!receiver_type->higher_equal(recv_xtype)) { // ignore redundant casts
3097 // Subsume downstream occurrences of receiver with a cast to
3098 // recv_xtype, since now we know what the type will be.
3099 Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype);
3100 Node* res = _gvn.transform(cast);
3101 if (recv_xtype->is_inlinetypeptr()) {
3102 assert(!gvn().type(res)->maybe_null(), "receiver should never be null");
3103 res = InlineTypeNode::make_from_oop(this, res, recv_xtype->inline_klass());
3104 }
3105 (*casted_receiver) = res;
3106 assert(!(*casted_receiver)->is_top(), "that path should be unreachable");
3107 // (User must make the replace_in_map call.)
3108 }
3109 }
3110
3111 return fail;
3112 }
3113
3114 Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass,
3115 float prob) {
3116 Node* want_klass = makecon(tklass);
3117 Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
3118 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
3119 IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
3120 set_control(_gvn.transform(new IfTrueNode (iff)));
3121 Node* fail = _gvn.transform(new IfFalseNode(iff));
3122 return fail;
3123 }
3124
3125 //------------------------------subtype_check_receiver-------------------------
3126 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
3127 Node** casted_receiver) {
3128 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces)->try_improve();
3129 Node* want_klass = makecon(tklass);
3130
3131 Node* slow_ctl = gen_subtype_check(receiver, want_klass);
3132
3133 // Ignore interface type information until interface types are properly tracked.
3134 if (!stopped() && !klass->is_interface()) {
3135 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3136 const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
3137 if (receiver_type != nullptr && !receiver_type->higher_equal(recv_type)) { // ignore redundant casts
3138 Node* cast = _gvn.transform(new CheckCastPPNode(control(), receiver, recv_type));
3139 if (recv_type->is_inlinetypeptr()) {
3140 cast = InlineTypeNode::make_from_oop(this, cast, recv_type->inline_klass());
3141 }
3142 (*casted_receiver) = cast;
3143 }
3144 }
3145
3146 return slow_ctl;
3147 }
3148
3149 //------------------------------seems_never_null-------------------------------
3150 // Use null_seen information if it is available from the profile.
3151 // If we see an unexpected null at a type check we record it and force a
3152 // recompile; the offending check will be recompiled to handle nulls.
3153 // If we see several offending BCIs, then all checks in the
3154 // method will be recompiled.
3155 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
3156 speculating = !_gvn.type(obj)->speculative_maybe_null();
3157 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
3158 if (UncommonNullCast // Cutout for this technique
3159 && obj != null() // And not the -Xcomp stupid case?
3160 && !too_many_traps(reason)
3161 ) {
3162 if (speculating) {
3231
3232 //------------------------maybe_cast_profiled_receiver-------------------------
3233 // If the profile has seen exactly one type, narrow to exactly that type.
3234 // Subsequent type checks will always fold up.
3235 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3236 const TypeKlassPtr* require_klass,
3237 ciKlass* spec_klass,
3238 bool safe_for_replace) {
3239 if (!UseTypeProfile || !TypeProfileCasts) return nullptr;
3240
3241 Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != nullptr);
3242
3243 // Make sure we haven't already deoptimized from this tactic.
3244 if (too_many_traps_or_recompiles(reason))
3245 return nullptr;
3246
3247 // (No, this isn't a call, but it's enough like a virtual call
3248 // to use the same ciMethod accessor to get the profile info...)
3249 // If we have a speculative type use it instead of profiling (which
3250 // may not help us)
3251 ciKlass* exact_kls = spec_klass;
3252 if (exact_kls == nullptr) {
3253 if (java_bc() == Bytecodes::_aastore) {
3254 ciKlass* array_type = nullptr;
3255 ciKlass* element_type = nullptr;
3256 ProfilePtrKind element_ptr = ProfileMaybeNull;
3257 bool flat_array = true;
3258 bool null_free_array = true;
3259 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
3260 exact_kls = element_type;
3261 } else {
3262 exact_kls = profile_has_unique_klass();
3263 }
3264 }
3265 if (exact_kls != nullptr) {// no cast failures here
3266 if (require_klass == nullptr ||
3267 C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) {
3268 // If we narrow the type to match what the type profile sees or
3269 // the speculative type, we can then remove the rest of the
3270 // cast.
3271 // This is a win, even if the exact_kls is very specific,
3272 // because downstream operations, such as method calls,
3273 // will often benefit from the sharper type.
3274 Node* exact_obj = not_null_obj; // will get updated in place...
3275 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
3276 &exact_obj);
3277 { PreserveJVMState pjvms(this);
3278 set_control(slow_ctl);
3279 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3280 }
3281 if (safe_for_replace) {
3282 replace_in_map(not_null_obj, exact_obj);
3283 }
3284 return exact_obj;
3374 // If not_null_obj is dead, only null-path is taken
3375 if (stopped()) { // Doing instance-of on a null?
3376 set_control(null_ctl);
3377 return intcon(0);
3378 }
3379 region->init_req(_null_path, null_ctl);
3380 phi ->init_req(_null_path, intcon(0)); // Set null path value
3381 if (null_ctl == top()) {
3382 // Do this eagerly, so that pattern matches like is_diamond_phi
3383 // will work even during parsing.
3384 assert(_null_path == PATH_LIMIT-1, "delete last");
3385 region->del_req(_null_path);
3386 phi ->del_req(_null_path);
3387 }
3388
3389 // Do we know the type check always succeed?
3390 bool known_statically = false;
3391 if (_gvn.type(superklass)->singleton()) {
3392 const TypeKlassPtr* superk = _gvn.type(superklass)->is_klassptr();
3393 const TypeKlassPtr* subk = _gvn.type(obj)->is_oopptr()->as_klass_type();
3394 if (subk != nullptr && subk->is_loaded()) {
3395 int static_res = C->static_subtype_check(superk, subk);
3396 known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
3397 }
3398 }
3399
3400 if (!known_statically) {
3401 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3402 // We may not have profiling here or it may not help us. If we
3403 // have a speculative type use it to perform an exact cast.
3404 ciKlass* spec_obj_type = obj_type->speculative_type();
3405 if (spec_obj_type != nullptr || (ProfileDynamicTypes && data != nullptr)) {
3406 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, nullptr, spec_obj_type, safe_for_replace);
3407 if (stopped()) { // Profile disagrees with this path.
3408 set_control(null_ctl); // Null is the only remaining possibility.
3409 return intcon(0);
3410 }
3411 if (cast_obj != nullptr) {
3412 not_null_obj = cast_obj;
3413 }
3414 }
3430 record_for_igvn(region);
3431
3432 // If we know the type check always succeeds then we don't use the
3433 // profiling data at this bytecode. Don't lose it, feed it to the
3434 // type system as a speculative type.
3435 if (safe_for_replace) {
3436 Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3437 replace_in_map(obj, casted_obj);
3438 }
3439
3440 return _gvn.transform(phi);
3441 }
3442
3443 //-------------------------------gen_checkcast---------------------------------
3444 // Generate a checkcast idiom. Used by both the checkcast bytecode and the
3445 // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
3446 // uncommon-trap paths work. Adjust stack after this call.
3447 // If failure_control is supplied and not null, it is filled in with
3448 // the control edge for the cast failure. Otherwise, an appropriate
3449 // uncommon trap or exception is thrown.
3450 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_control, bool null_free) {
3451 kill_dead_locals(); // Benefit all the uncommon traps
3452 const TypeKlassPtr* klass_ptr_type = _gvn.type(superklass)->is_klassptr();
3453 const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve();
3454 const TypeOopPtr* toop = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type();
3455 bool safe_for_replace = (failure_control == nullptr);
3456 assert(!null_free || toop->is_inlinetypeptr(), "must be an inline type pointer");
3457
3458 // Fast cutout: Check the case that the cast is vacuously true.
3459 // This detects the common cases where the test will short-circuit
3460 // away completely. We do this before we perform the null check,
3461 // because if the test is going to turn into zero code, we don't
3462 // want a residual null check left around. (Causes a slowdown,
3463 // for example, in some objArray manipulations, such as a[i]=a[j].)
3464 if (improved_klass_ptr_type->singleton()) {
3465 const TypeKlassPtr* kptr = nullptr;
3466 const Type* t = _gvn.type(obj);
3467 if (t->isa_oop_ptr()) {
3468 kptr = t->is_oopptr()->as_klass_type();
3469 } else if (obj->is_InlineType()) {
3470 ciInlineKlass* vk = t->inline_klass();
3471 kptr = TypeInstKlassPtr::make(TypePtr::NotNull, vk, Type::Offset(0));
3472 }
3473 if (kptr != nullptr) {
3474 switch (C->static_subtype_check(improved_klass_ptr_type, kptr)) {
3475 case Compile::SSC_always_true:
3476 // If we know the type check always succeed then we don't use
3477 // the profiling data at this bytecode. Don't lose it, feed it
3478 // to the type system as a speculative type.
3479 obj = record_profiled_receiver_for_speculation(obj);
3480 if (null_free) {
3481 assert(safe_for_replace, "must be");
3482 obj = null_check(obj);
3483 }
3484 assert(stopped() || !toop->is_inlinetypeptr() || obj->is_InlineType(), "should have been scalarized");
3485 return obj;
3486 case Compile::SSC_always_false:
3487 if (null_free) {
3488 assert(safe_for_replace, "must be");
3489 obj = null_check(obj);
3490 }
3491 // It needs a null check because a null will *pass* the cast check.
3492 if (t->isa_oopptr() != nullptr && !t->is_oopptr()->maybe_null()) {
3493 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3494 Deoptimization::DeoptReason reason = is_aastore ?
3495 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3496 builtin_throw(reason);
3497 return top();
3498 } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3499 return null_assert(obj);
3500 }
3501 break; // Fall through to full check
3502 default:
3503 break;
3504 }
3505 }
3506 }
3507
3508 ciProfileData* data = nullptr;
3509 if (failure_control == nullptr) { // use MDO in regular case only
3510 assert(java_bc() == Bytecodes::_aastore ||
3511 java_bc() == Bytecodes::_checkcast,
3512 "interpreter profiles type checks only for these BCs");
3513 if (method()->method_data()->is_mature()) {
3514 data = method()->method_data()->bci_to_data(bci());
3515 }
3516 }
3517
3518 // Make the merge point
3519 enum { _obj_path = 1, _null_path, PATH_LIMIT };
3520 RegionNode* region = new RegionNode(PATH_LIMIT);
3521 Node* phi = new PhiNode(region, toop);
3522 _gvn.set_type(region, Type::CONTROL);
3523 _gvn.set_type(phi, toop);
3524
3525 C->set_has_split_ifs(true); // Has chance for split-if optimization
3526
3527 // Use null-cast information if it is available
3528 bool speculative_not_null = false;
3529 bool never_see_null = ((failure_control == nullptr) // regular case only
3530 && seems_never_null(obj, data, speculative_not_null));
3531
3532 if (obj->is_InlineType()) {
3533 // Re-execute if buffering during triggers deoptimization
3534 PreserveReexecuteState preexecs(this);
3535 jvms()->set_should_reexecute(true);
3536 obj = obj->as_InlineType()->buffer(this, safe_for_replace);
3537 }
3538
3539 // Null check; get casted pointer; set region slot 3
3540 Node* null_ctl = top();
3541 Node* not_null_obj = nullptr;
3542 if (null_free) {
3543 assert(safe_for_replace, "must be");
3544 not_null_obj = null_check(obj);
3545 } else {
3546 not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3547 }
3548
3549 // If not_null_obj is dead, only null-path is taken
3550 if (stopped()) { // Doing instance-of on a null?
3551 set_control(null_ctl);
3552 if (toop->is_inlinetypeptr()) {
3553 return InlineTypeNode::make_null(_gvn, toop->inline_klass());
3554 }
3555 return null();
3556 }
3557 region->init_req(_null_path, null_ctl);
3558 phi ->init_req(_null_path, null()); // Set null path value
3559 if (null_ctl == top()) {
3560 // Do this eagerly, so that pattern matches like is_diamond_phi
3561 // will work even during parsing.
3562 assert(_null_path == PATH_LIMIT-1, "delete last");
3563 region->del_req(_null_path);
3564 phi ->del_req(_null_path);
3565 }
3566
3567 Node* cast_obj = nullptr;
3568 if (improved_klass_ptr_type->klass_is_exact()) {
3569 // The following optimization tries to statically cast the speculative type of the object
3570 // (for example obtained during profiling) to the type of the superklass and then do a
3571 // dynamic check that the type of the object is what we expect. To work correctly
3572 // for checkcast and aastore the type of superklass should be exact.
3573 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3574 // We may not have profiling here or it may not help us. If we have
3575 // a speculative type use it to perform an exact cast.
3576 ciKlass* spec_obj_type = obj_type->speculative_type();
3577 if (spec_obj_type != nullptr || data != nullptr) {
3578 cast_obj = maybe_cast_profiled_receiver(not_null_obj, improved_klass_ptr_type, spec_obj_type, safe_for_replace);
3579 if (cast_obj != nullptr) {
3580 if (failure_control != nullptr) // failure is now impossible
3581 (*failure_control) = top();
3582 // adjust the type of the phi to the exact klass:
3583 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3584 }
3585 }
3586 }
3587
3588 if (cast_obj == nullptr) {
3589 // Generate the subtype check
3590 Node* improved_superklass = superklass;
3591 if (improved_klass_ptr_type != klass_ptr_type && improved_klass_ptr_type->singleton()) {
3592 // Only improve the super class for constants which allows subsequent sub type checks to possibly be commoned up.
3593 // The other non-constant cases cannot be improved with a cast node here since they could be folded to top.
3594 // Additionally, the benefit would only be minor in non-constant cases.
3595 improved_superklass = makecon(improved_klass_ptr_type);
3596 }
3597 Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, improved_superklass);
3598 // Plug in success path into the merge
3599 cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3600 // Failure path ends in uncommon trap (or may be dead - failure impossible)
3601 if (failure_control == nullptr) {
3602 if (not_subtype_ctrl != top()) { // If failure is possible
3603 PreserveJVMState pjvms(this);
3604 set_control(not_subtype_ctrl);
3605 Node* obj_klass = nullptr;
3606 if (not_null_obj->is_InlineType()) {
3607 obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->inline_klass()));
3608 } else {
3609 obj_klass = load_object_klass(not_null_obj);
3610 }
3611 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3612 Deoptimization::DeoptReason reason = is_aastore ?
3613 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3614 builtin_throw(reason);
3615 }
3616 } else {
3617 (*failure_control) = not_subtype_ctrl;
3618 }
3619 }
3620
3621 region->init_req(_obj_path, control());
3622 phi ->init_req(_obj_path, cast_obj);
3623
3624 // A merge of null or Casted-NotNull obj
3625 Node* res = _gvn.transform(phi);
3626
3627 // Note I do NOT always 'replace_in_map(obj,result)' here.
3628 // if( tk->klass()->can_be_primary_super() )
3629 // This means that if I successfully store an Object into an array-of-String
3630 // I 'forget' that the Object is really now known to be a String. I have to
3631 // do this because we don't have true union types for interfaces - if I store
3632 // a Baz into an array-of-Interface and then tell the optimizer it's an
3633 // Interface, I forget that it's also a Baz and cannot do Baz-like field
3634 // references to it. FIX THIS WHEN UNION TYPES APPEAR!
3635 // replace_in_map( obj, res );
3636
3637 // Return final merged results
3638 set_control( _gvn.transform(region) );
3639 record_for_igvn(region);
3640
3641 bool not_inline = !toop->can_be_inline_type();
3642 bool not_flat_in_array = !UseFlatArray || not_inline || (toop->is_inlinetypeptr() && !toop->inline_klass()->flat_in_array());
3643 if (EnableValhalla && not_flat_in_array) {
3644 // Check if obj has been loaded from an array
3645 obj = obj->isa_DecodeN() ? obj->in(1) : obj;
3646 Node* array = nullptr;
3647 if (obj->isa_Load()) {
3648 Node* address = obj->in(MemNode::Address);
3649 if (address->isa_AddP()) {
3650 array = address->as_AddP()->in(AddPNode::Base);
3651 }
3652 } else if (obj->is_Phi()) {
3653 Node* region = obj->in(0);
3654 // TODO make this more robust (see JDK-8231346)
3655 if (region->req() == 3 && region->in(2) != nullptr && region->in(2)->in(0) != nullptr) {
3656 IfNode* iff = region->in(2)->in(0)->isa_If();
3657 if (iff != nullptr) {
3658 iff->is_flat_array_check(&_gvn, &array);
3659 }
3660 }
3661 }
3662 if (array != nullptr) {
3663 const TypeAryPtr* ary_t = _gvn.type(array)->isa_aryptr();
3664 if (ary_t != nullptr && !ary_t->is_flat()) {
3665 if (!ary_t->is_not_null_free() && not_inline) {
3666 // Casting array element to a non-inline-type, mark array as not null-free.
3667 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_null_free()));
3668 replace_in_map(array, cast);
3669 } else if (!ary_t->is_not_flat()) {
3670 // Casting array element to a non-flat type, mark array as not flat.
3671 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_flat()));
3672 replace_in_map(array, cast);
3673 }
3674 }
3675 }
3676 }
3677
3678 if (!stopped() && !res->is_InlineType()) {
3679 res = record_profiled_receiver_for_speculation(res);
3680 if (toop->is_inlinetypeptr()) {
3681 Node* vt = InlineTypeNode::make_from_oop(this, res, toop->inline_klass(), !gvn().type(res)->maybe_null());
3682 res = vt;
3683 if (safe_for_replace) {
3684 replace_in_map(obj, vt);
3685 replace_in_map(not_null_obj, vt);
3686 replace_in_map(res, vt);
3687 }
3688 }
3689 }
3690 return res;
3691 }
3692
3693 Node* GraphKit::mark_word_test(Node* obj, uintptr_t mask_val, bool eq, bool check_lock) {
3694 // Load markword
3695 Node* mark_adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3696 Node* mark = make_load(nullptr, mark_adr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
3697 if (check_lock) {
3698 // Check if obj is locked
3699 Node* locked_bit = MakeConX(markWord::unlocked_value);
3700 locked_bit = _gvn.transform(new AndXNode(locked_bit, mark));
3701 Node* cmp = _gvn.transform(new CmpXNode(locked_bit, MakeConX(0)));
3702 Node* is_unlocked = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3703 IfNode* iff = new IfNode(control(), is_unlocked, PROB_MAX, COUNT_UNKNOWN);
3704 _gvn.transform(iff);
3705 Node* locked_region = new RegionNode(3);
3706 Node* mark_phi = new PhiNode(locked_region, TypeX_X);
3707
3708 // Unlocked: Use bits from mark word
3709 locked_region->init_req(1, _gvn.transform(new IfTrueNode(iff)));
3710 mark_phi->init_req(1, mark);
3711
3712 // Locked: Load prototype header from klass
3713 set_control(_gvn.transform(new IfFalseNode(iff)));
3714 // Make loads control dependent to make sure they are only executed if array is locked
3715 Node* klass_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
3716 Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, control(), C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
3717 Node* proto_adr = basic_plus_adr(klass, in_bytes(Klass::prototype_header_offset()));
3718 Node* proto = _gvn.transform(LoadNode::make(_gvn, control(), C->immutable_memory(), proto_adr, proto_adr->bottom_type()->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
3719
3720 locked_region->init_req(2, control());
3721 mark_phi->init_req(2, proto);
3722 set_control(_gvn.transform(locked_region));
3723 record_for_igvn(locked_region);
3724
3725 mark = mark_phi;
3726 }
3727
3728 // Now check if mark word bits are set
3729 Node* mask = MakeConX(mask_val);
3730 Node* masked = _gvn.transform(new AndXNode(_gvn.transform(mark), mask));
3731 record_for_igvn(masked); // Give it a chance to be optimized out by IGVN
3732 Node* cmp = _gvn.transform(new CmpXNode(masked, mask));
3733 return _gvn.transform(new BoolNode(cmp, eq ? BoolTest::eq : BoolTest::ne));
3734 }
3735
3736 Node* GraphKit::inline_type_test(Node* obj, bool is_inline) {
3737 return mark_word_test(obj, markWord::inline_type_pattern, is_inline, /* check_lock = */ false);
3738 }
3739
3740 Node* GraphKit::flat_array_test(Node* array_or_klass, bool flat) {
3741 // We can't use immutable memory here because the mark word is mutable.
3742 // PhaseIdealLoop::move_flat_array_check_out_of_loop will make sure the
3743 // check is moved out of loops (mainly to enable loop unswitching).
3744 Node* cmp = _gvn.transform(new FlatArrayCheckNode(C, memory(Compile::AliasIdxRaw), array_or_klass));
3745 record_for_igvn(cmp); // Give it a chance to be optimized out by IGVN
3746 return _gvn.transform(new BoolNode(cmp, flat ? BoolTest::eq : BoolTest::ne));
3747 }
3748
3749 Node* GraphKit::null_free_array_test(Node* array, bool null_free) {
3750 return mark_word_test(array, markWord::null_free_array_bit_in_place, null_free);
3751 }
3752
3753 // Deoptimize if 'ary' is a null-free inline type array and 'val' is null
3754 Node* GraphKit::inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace) {
3755 RegionNode* region = new RegionNode(3);
3756 Node* null_ctl = top();
3757 null_check_oop(val, &null_ctl);
3758 if (null_ctl != top()) {
3759 PreserveJVMState pjvms(this);
3760 set_control(null_ctl);
3761 {
3762 // Deoptimize if null-free array
3763 BuildCutout unless(this, null_free_array_test(ary, /* null_free = */ false), PROB_MAX);
3764 inc_sp(nargs);
3765 uncommon_trap(Deoptimization::Reason_null_check,
3766 Deoptimization::Action_none);
3767 }
3768 region->init_req(1, control());
3769 }
3770 region->init_req(2, control());
3771 set_control(_gvn.transform(region));
3772 record_for_igvn(region);
3773 if (_gvn.type(val) == TypePtr::NULL_PTR) {
3774 // Since we were just successfully storing null, the array can't be null free.
3775 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
3776 ary_t = ary_t->cast_to_not_null_free();
3777 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
3778 if (safe_for_replace) {
3779 replace_in_map(ary, cast);
3780 }
3781 ary = cast;
3782 }
3783 return ary;
3784 }
3785
3786 //------------------------------next_monitor-----------------------------------
3787 // What number should be given to the next monitor?
3788 int GraphKit::next_monitor() {
3789 int current = jvms()->monitor_depth()* C->sync_stack_slots();
3790 int next = current + C->sync_stack_slots();
3791 // Keep the toplevel high water mark current:
3792 if (C->fixed_slots() < next) C->set_fixed_slots(next);
3793 return current;
3794 }
3795
3796 //------------------------------insert_mem_bar---------------------------------
3797 // Memory barrier to avoid floating things around
3798 // The membar serves as a pinch point between both control and all memory slices.
3799 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3800 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3801 mb->init_req(TypeFunc::Control, control());
3802 mb->init_req(TypeFunc::Memory, reset_memory());
3803 Node* membar = _gvn.transform(mb);
3831 }
3832 Node* membar = _gvn.transform(mb);
3833 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3834 if (alias_idx == Compile::AliasIdxBot) {
3835 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3836 } else {
3837 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3838 }
3839 return membar;
3840 }
3841
3842 //------------------------------shared_lock------------------------------------
3843 // Emit locking code.
3844 FastLockNode* GraphKit::shared_lock(Node* obj) {
3845 // bci is either a monitorenter bc or InvocationEntryBci
3846 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3847 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3848
3849 if( !GenerateSynchronizationCode )
3850 return nullptr; // Not locking things?
3851
3852 if (stopped()) // Dead monitor?
3853 return nullptr;
3854
3855 assert(dead_locals_are_killed(), "should kill locals before sync. point");
3856
3857 // Box the stack location
3858 Node* box = new BoxLockNode(next_monitor());
3859 // Check for bailout after new BoxLockNode
3860 if (failing()) { return nullptr; }
3861 box = _gvn.transform(box);
3862 Node* mem = reset_memory();
3863
3864 FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
3865
3866 // Create the rtm counters for this fast lock if needed.
3867 flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3868
3869 // Add monitor to debug info for the slow path. If we block inside the
3870 // slow path and de-opt, we need the monitor hanging around
3871 map()->push_monitor( flock );
3903 }
3904 #endif
3905
3906 return flock;
3907 }
3908
3909
3910 //------------------------------shared_unlock----------------------------------
3911 // Emit unlocking code.
3912 void GraphKit::shared_unlock(Node* box, Node* obj) {
3913 // bci is either a monitorenter bc or InvocationEntryBci
3914 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3915 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3916
3917 if( !GenerateSynchronizationCode )
3918 return;
3919 if (stopped()) { // Dead monitor?
3920 map()->pop_monitor(); // Kill monitor from debug info
3921 return;
3922 }
3923 assert(!obj->is_InlineType(), "should not unlock on inline type");
3924
3925 // Memory barrier to avoid floating things down past the locked region
3926 insert_mem_bar(Op_MemBarReleaseLock);
3927
3928 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3929 UnlockNode *unlock = new UnlockNode(C, tf);
3930 #ifdef ASSERT
3931 unlock->set_dbg_jvms(sync_jvms());
3932 #endif
3933 uint raw_idx = Compile::AliasIdxRaw;
3934 unlock->init_req( TypeFunc::Control, control() );
3935 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3936 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3937 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3938 unlock->init_req( TypeFunc::ReturnAdr, top() );
3939
3940 unlock->init_req(TypeFunc::Parms + 0, obj);
3941 unlock->init_req(TypeFunc::Parms + 1, box);
3942 unlock = _gvn.transform(unlock)->as_Unlock();
3943
3944 Node* mem = reset_memory();
3945
3946 // unlock has no side-effects, sets few values
3947 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3948
3949 // Kill monitor from debug info
3950 map()->pop_monitor( );
3951 }
3952
3953 //-------------------------------get_layout_helper-----------------------------
3954 // If the given klass is a constant or known to be an array,
3955 // fetch the constant layout helper value into constant_value
3956 // and return null. Otherwise, load the non-constant
3957 // layout helper value, and return the node which represents it.
3958 // This two-faced routine is useful because allocation sites
3959 // almost always feature constant types.
3960 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3961 const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
3962 if (!StressReflectiveCode && klass_t != nullptr) {
3963 bool xklass = klass_t->klass_is_exact();
3964 bool can_be_flat = false;
3965 const TypeAryPtr* ary_type = klass_t->as_instance_type()->isa_aryptr();
3966 if (UseFlatArray && !xklass && ary_type != nullptr && !ary_type->is_null_free()) {
3967 // TODO 8325106 Fix comment
3968 // The runtime type of [LMyValue might be [QMyValue due to [QMyValue <: [LMyValue. Don't constant fold.
3969 const TypeOopPtr* elem = ary_type->elem()->make_oopptr();
3970 can_be_flat = ary_type->can_be_inline_array() && (!elem->is_inlinetypeptr() || elem->inline_klass()->flat_in_array());
3971 }
3972 if (!can_be_flat && (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM))) {
3973 jint lhelper;
3974 if (klass_t->is_flat()) {
3975 lhelper = ary_type->flat_layout_helper();
3976 } else if (klass_t->isa_aryklassptr()) {
3977 BasicType elem = ary_type->elem()->array_element_basic_type();
3978 if (is_reference_type(elem, true)) {
3979 elem = T_OBJECT;
3980 }
3981 lhelper = Klass::array_layout_helper(elem);
3982 } else {
3983 lhelper = klass_t->is_instklassptr()->exact_klass()->layout_helper();
3984 }
3985 if (lhelper != Klass::_lh_neutral_value) {
3986 constant_value = lhelper;
3987 return (Node*) nullptr;
3988 }
3989 }
3990 }
3991 constant_value = Klass::_lh_neutral_value; // put in a known value
3992 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3993 return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered);
3994 }
3995
3996 // We just put in an allocate/initialize with a big raw-memory effect.
3997 // Hook selected additional alias categories on the initialization.
3998 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3999 MergeMemNode* init_in_merge,
4000 Node* init_out_raw) {
4001 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
4002 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
4003
4004 Node* prevmem = kit.memory(alias_idx);
4005 init_in_merge->set_memory_at(alias_idx, prevmem);
4006 if (init_out_raw != nullptr) {
4007 kit.set_memory(init_out_raw, alias_idx);
4008 }
4009 }
4010
4011 //---------------------------set_output_for_allocation-------------------------
4012 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
4013 const TypeOopPtr* oop_type,
4014 bool deoptimize_on_exception) {
4015 int rawidx = Compile::AliasIdxRaw;
4016 alloc->set_req( TypeFunc::FramePtr, frameptr() );
4017 add_safepoint_edges(alloc);
4018 Node* allocx = _gvn.transform(alloc);
4019 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
4020 // create memory projection for i_o
4021 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
4022 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
4023
4024 // create a memory projection as for the normal control path
4025 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
4026 set_memory(malloc, rawidx);
4027
4028 // a normal slow-call doesn't change i_o, but an allocation does
4029 // we create a separate i_o projection for the normal control path
4030 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
4031 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
4032
4033 // put in an initialization barrier
4034 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
4035 rawoop)->as_Initialize();
4036 assert(alloc->initialization() == init, "2-way macro link must work");
4037 assert(init ->allocation() == alloc, "2-way macro link must work");
4038 {
4039 // Extract memory strands which may participate in the new object's
4040 // initialization, and source them from the new InitializeNode.
4041 // This will allow us to observe initializations when they occur,
4042 // and link them properly (as a group) to the InitializeNode.
4043 assert(init->in(InitializeNode::Memory) == malloc, "");
4044 MergeMemNode* minit_in = MergeMemNode::make(malloc);
4045 init->set_req(InitializeNode::Memory, minit_in);
4046 record_for_igvn(minit_in); // fold it up later, if possible
4047 _gvn.set_type(minit_in, Type::MEMORY);
4048 Node* minit_out = memory(rawidx);
4049 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
4050 // Add an edge in the MergeMem for the header fields so an access
4051 // to one of those has correct memory state
4052 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
4053 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
4054 if (oop_type->isa_aryptr()) {
4055 const TypeAryPtr* arytype = oop_type->is_aryptr();
4056 if (arytype->is_flat()) {
4057 // Initially all flat array accesses share a single slice
4058 // but that changes after parsing. Prepare the memory graph so
4059 // it can optimize flat array accesses properly once they
4060 // don't share a single slice.
4061 assert(C->flat_accesses_share_alias(), "should be set at parse time");
4062 C->set_flat_accesses_share_alias(false);
4063 ciInlineKlass* vk = arytype->elem()->inline_klass();
4064 for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) {
4065 ciField* field = vk->nonstatic_field_at(i);
4066 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
4067 continue; // do not bother to track really large numbers of fields
4068 int off_in_vt = field->offset_in_bytes() - vk->first_field_offset();
4069 const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
4070 int fieldidx = C->get_alias_index(adr_type, true);
4071 // Pass nullptr for init_out. Having per flat array element field memory edges as uses of the Initialize node
4072 // can result in per flat array field Phis to be created which confuses the logic of
4073 // Compile::adjust_flat_array_access_aliases().
4074 hook_memory_on_init(*this, fieldidx, minit_in, nullptr);
4075 }
4076 C->set_flat_accesses_share_alias(true);
4077 hook_memory_on_init(*this, C->get_alias_index(TypeAryPtr::INLINES), minit_in, minit_out);
4078 } else {
4079 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
4080 int elemidx = C->get_alias_index(telemref);
4081 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
4082 }
4083 } else if (oop_type->isa_instptr()) {
4084 set_memory(minit_out, C->get_alias_index(oop_type)); // mark word
4085 ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
4086 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
4087 ciField* field = ik->nonstatic_field_at(i);
4088 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
4089 continue; // do not bother to track really large numbers of fields
4090 // Find (or create) the alias category for this field:
4091 int fieldidx = C->alias_type(field)->index();
4092 hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
4093 }
4094 }
4095 }
4096
4097 // Cast raw oop to the real thing...
4098 Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
4099 javaoop = _gvn.transform(javaoop);
4100 C->set_recent_alloc(control(), javaoop);
4101 assert(just_allocated_object(control()) == javaoop, "just allocated");
4102
4103 #ifdef ASSERT
4104 { // Verify that the AllocateNode::Ideal_allocation recognizers work:
4115 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
4116 }
4117 }
4118 #endif //ASSERT
4119
4120 return javaoop;
4121 }
4122
4123 //---------------------------new_instance--------------------------------------
4124 // This routine takes a klass_node which may be constant (for a static type)
4125 // or may be non-constant (for reflective code). It will work equally well
4126 // for either, and the graph will fold nicely if the optimizer later reduces
4127 // the type to a constant.
4128 // The optional arguments are for specialized use by intrinsics:
4129 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
4130 // - If 'return_size_val', report the total object size to the caller.
4131 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
4132 Node* GraphKit::new_instance(Node* klass_node,
4133 Node* extra_slow_test,
4134 Node* *return_size_val,
4135 bool deoptimize_on_exception,
4136 InlineTypeNode* inline_type_node) {
4137 // Compute size in doublewords
4138 // The size is always an integral number of doublewords, represented
4139 // as a positive bytewise size stored in the klass's layout_helper.
4140 // The layout_helper also encodes (in a low bit) the need for a slow path.
4141 jint layout_con = Klass::_lh_neutral_value;
4142 Node* layout_val = get_layout_helper(klass_node, layout_con);
4143 bool layout_is_con = (layout_val == nullptr);
4144
4145 if (extra_slow_test == nullptr) extra_slow_test = intcon(0);
4146 // Generate the initial go-slow test. It's either ALWAYS (return a
4147 // Node for 1) or NEVER (return a null) or perhaps (in the reflective
4148 // case) a computed value derived from the layout_helper.
4149 Node* initial_slow_test = nullptr;
4150 if (layout_is_con) {
4151 assert(!StressReflectiveCode, "stress mode does not use these paths");
4152 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
4153 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
4154 } else { // reflective case
4155 // This reflective path is used by Unsafe.allocateInstance.
4156 // (It may be stress-tested by specifying StressReflectiveCode.)
4157 // Basically, we want to get into the VM is there's an illegal argument.
4158 Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
4159 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
4160 if (extra_slow_test != intcon(0)) {
4161 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
4162 }
4163 // (Macro-expander will further convert this to a Bool, if necessary.)
4174
4175 // Clear the low bits to extract layout_helper_size_in_bytes:
4176 assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
4177 Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
4178 size = _gvn.transform( new AndXNode(size, mask) );
4179 }
4180 if (return_size_val != nullptr) {
4181 (*return_size_val) = size;
4182 }
4183
4184 // This is a precise notnull oop of the klass.
4185 // (Actually, it need not be precise if this is a reflective allocation.)
4186 // It's what we cast the result to.
4187 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
4188 if (!tklass) tklass = TypeInstKlassPtr::OBJECT;
4189 const TypeOopPtr* oop_type = tklass->as_instance_type();
4190
4191 // Now generate allocation code
4192
4193 // The entire memory state is needed for slow path of the allocation
4194 // since GC and deoptimization can happen.
4195 Node *mem = reset_memory();
4196 set_all_memory(mem); // Create new memory state
4197
4198 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
4199 control(), mem, i_o(),
4200 size, klass_node,
4201 initial_slow_test, inline_type_node);
4202
4203 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
4204 }
4205
4206 //-------------------------------new_array-------------------------------------
4207 // helper for newarray and anewarray
4208 // The 'length' parameter is (obviously) the length of the array.
4209 // The optional arguments are for specialized use by intrinsics:
4210 // - If 'return_size_val', report the non-padded array size (sum of header size
4211 // and array body) to the caller.
4212 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
4213 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
4214 Node* length, // number of array elements
4215 int nargs, // number of arguments to push back for uncommon trap
4216 Node* *return_size_val,
4217 bool deoptimize_on_exception) {
4218 jint layout_con = Klass::_lh_neutral_value;
4219 Node* layout_val = get_layout_helper(klass_node, layout_con);
4220 bool layout_is_con = (layout_val == nullptr);
4221
4222 if (!layout_is_con && !StressReflectiveCode &&
4223 !too_many_traps(Deoptimization::Reason_class_check)) {
4224 // This is a reflective array creation site.
4225 // Optimistically assume that it is a subtype of Object[],
4226 // so that we can fold up all the address arithmetic.
4227 layout_con = Klass::array_layout_helper(T_OBJECT);
4228 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
4229 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
4230 { BuildCutout unless(this, bol_lh, PROB_MAX);
4231 inc_sp(nargs);
4232 uncommon_trap(Deoptimization::Reason_class_check,
4233 Deoptimization::Action_maybe_recompile);
4234 }
4235 layout_val = nullptr;
4236 layout_is_con = true;
4237 }
4238
4239 // Generate the initial go-slow test. Make sure we do not overflow
4240 // if length is huge (near 2Gig) or negative! We do not need
4241 // exact double-words here, just a close approximation of needed
4242 // double-words. We can't add any offset or rounding bits, lest we
4243 // take a size -1 of bytes and make it positive. Use an unsigned
4244 // compare, so negative sizes look hugely positive.
4245 int fast_size_limit = FastAllocateSizeLimit;
4246 if (layout_is_con) {
4247 assert(!StressReflectiveCode, "stress mode does not use these paths");
4248 // Increase the size limit if we have exact knowledge of array type.
4249 int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
4250 fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0);
4251 }
4252
4253 Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
4254 Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
4255
4256 // --- Size Computation ---
4257 // array_size = round_to_heap(array_header + (length << elem_shift));
4258 // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
4259 // and align_to(x, y) == ((x + y-1) & ~(y-1))
4260 // The rounding mask is strength-reduced, if possible.
4261 int round_mask = MinObjAlignmentInBytes - 1;
4262 Node* header_size = nullptr;
4263 // (T_BYTE has the weakest alignment and size restrictions...)
4264 if (layout_is_con) {
4265 int hsize = Klass::layout_helper_header_size(layout_con);
4266 int eshift = Klass::layout_helper_log2_element_size(layout_con);
4267 bool is_flat_array = Klass::layout_helper_is_flatArray(layout_con);
4268 if ((round_mask & ~right_n_bits(eshift)) == 0)
4269 round_mask = 0; // strength-reduce it if it goes away completely
4270 assert(is_flat_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
4271 int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
4272 assert(header_size_min <= hsize, "generic minimum is smallest");
4273 header_size = intcon(hsize);
4274 } else {
4275 Node* hss = intcon(Klass::_lh_header_size_shift);
4276 Node* hsm = intcon(Klass::_lh_header_size_mask);
4277 header_size = _gvn.transform(new URShiftINode(layout_val, hss));
4278 header_size = _gvn.transform(new AndINode(header_size, hsm));
4279 }
4280
4281 Node* elem_shift = nullptr;
4282 if (layout_is_con) {
4283 int eshift = Klass::layout_helper_log2_element_size(layout_con);
4284 if (eshift != 0)
4285 elem_shift = intcon(eshift);
4286 } else {
4287 // There is no need to mask or shift this value.
4288 // The semantics of LShiftINode include an implicit mask to 0x1F.
4289 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
4290 elem_shift = layout_val;
4337 }
4338 Node* non_rounded_size = _gvn.transform(new AddXNode(headerx, abody));
4339
4340 if (return_size_val != nullptr) {
4341 // This is the size
4342 (*return_size_val) = non_rounded_size;
4343 }
4344
4345 Node* size = non_rounded_size;
4346 if (round_mask != 0) {
4347 Node* mask1 = MakeConX(round_mask);
4348 size = _gvn.transform(new AddXNode(size, mask1));
4349 Node* mask2 = MakeConX(~round_mask);
4350 size = _gvn.transform(new AndXNode(size, mask2));
4351 }
4352 // else if round_mask == 0, the size computation is self-rounding
4353
4354 // Now generate allocation code
4355
4356 // The entire memory state is needed for slow path of the allocation
4357 // since GC and deoptimization can happen.
4358 Node *mem = reset_memory();
4359 set_all_memory(mem); // Create new memory state
4360
4361 if (initial_slow_test->is_Bool()) {
4362 // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
4363 initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
4364 }
4365
4366 const TypeKlassPtr* ary_klass = _gvn.type(klass_node)->isa_klassptr();
4367 const TypeOopPtr* ary_type = ary_klass->as_instance_type();
4368 const TypeAryPtr* ary_ptr = ary_type->isa_aryptr();
4369
4370 // TODO 8325106 Fix comment
4371 // Inline type array variants:
4372 // - null-ok: MyValue.ref[] (ciObjArrayKlass "[LMyValue")
4373 // - null-free: MyValue.val[] (ciObjArrayKlass "[QMyValue")
4374 // - null-free, flat : MyValue.val[] (ciFlatArrayKlass "[QMyValue")
4375 // Check if array is a null-free, non-flat inline type array
4376 // that needs to be initialized with the default inline type.
4377 Node* default_value = nullptr;
4378 Node* raw_default_value = nullptr;
4379 if (ary_ptr != nullptr && ary_ptr->klass_is_exact()) {
4380 // Array type is known
4381 if (ary_ptr->is_null_free() && !ary_ptr->is_flat()) {
4382 ciInlineKlass* vk = ary_ptr->elem()->inline_klass();
4383 default_value = InlineTypeNode::default_oop(gvn(), vk);
4384 if (UseCompressedOops) {
4385 // With compressed oops, the 64-bit init value is built from two 32-bit compressed oops
4386 default_value = _gvn.transform(new EncodePNode(default_value, default_value->bottom_type()->make_narrowoop()));
4387 Node* lower = _gvn.transform(new CastP2XNode(control(), default_value));
4388 Node* upper = _gvn.transform(new LShiftLNode(lower, intcon(32)));
4389 raw_default_value = _gvn.transform(new OrLNode(lower, upper));
4390 } else {
4391 raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value));
4392 }
4393 }
4394 }
4395
4396 Node* valid_length_test = _gvn.intcon(1);
4397 if (ary_type->isa_aryptr()) {
4398 BasicType bt = ary_type->isa_aryptr()->elem()->array_element_basic_type();
4399 jint max = TypeAryPtr::max_array_length(bt);
4400 Node* valid_length_cmp = _gvn.transform(new CmpUNode(length, intcon(max)));
4401 valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le));
4402 }
4403
4404 // Create the AllocateArrayNode and its result projections
4405 AllocateArrayNode* alloc
4406 = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
4407 control(), mem, i_o(),
4408 size, klass_node,
4409 initial_slow_test,
4410 length, valid_length_test,
4411 default_value, raw_default_value);
4412 // Cast to correct type. Note that the klass_node may be constant or not,
4413 // and in the latter case the actual array type will be inexact also.
4414 // (This happens via a non-constant argument to inline_native_newArray.)
4415 // In any case, the value of klass_node provides the desired array type.
4416 const TypeInt* length_type = _gvn.find_int_type(length);
4417 if (ary_type->isa_aryptr() && length_type != nullptr) {
4418 // Try to get a better type than POS for the size
4419 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4420 }
4421
4422 Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
4423
4424 array_ideal_length(alloc, ary_type, true);
4425 return javaoop;
4426 }
4427
4428 // The following "Ideal_foo" functions are placed here because they recognize
4429 // the graph shapes created by the functions immediately above.
4430
4431 //---------------------------Ideal_allocation----------------------------------
4538 set_all_memory(ideal.merged_memory());
4539 set_i_o(ideal.i_o());
4540 set_control(ideal.ctrl());
4541 }
4542
4543 void GraphKit::final_sync(IdealKit& ideal) {
4544 // Final sync IdealKit and graphKit.
4545 sync_kit(ideal);
4546 }
4547
4548 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4549 Node* len = load_array_length(load_String_value(str, set_ctrl));
4550 Node* coder = load_String_coder(str, set_ctrl);
4551 // Divide length by 2 if coder is UTF16
4552 return _gvn.transform(new RShiftINode(len, coder));
4553 }
4554
4555 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4556 int value_offset = java_lang_String::value_offset();
4557 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4558 false, nullptr, Type::Offset(0));
4559 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4560 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4561 TypeAry::make(TypeInt::BYTE, TypeInt::POS, false, false, true, true),
4562 ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0));
4563 Node* p = basic_plus_adr(str, str, value_offset);
4564 Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4565 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4566 return load;
4567 }
4568
4569 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4570 if (!CompactStrings) {
4571 return intcon(java_lang_String::CODER_UTF16);
4572 }
4573 int coder_offset = java_lang_String::coder_offset();
4574 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4575 false, nullptr, Type::Offset(0));
4576 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4577
4578 Node* p = basic_plus_adr(str, str, coder_offset);
4579 Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4580 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4581 return load;
4582 }
4583
4584 void GraphKit::store_String_value(Node* str, Node* value) {
4585 int value_offset = java_lang_String::value_offset();
4586 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4587 false, nullptr, Type::Offset(0));
4588 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4589
4590 access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,
4591 value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4592 }
4593
4594 void GraphKit::store_String_coder(Node* str, Node* value) {
4595 int coder_offset = java_lang_String::coder_offset();
4596 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4597 false, nullptr, Type::Offset(0));
4598 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4599
4600 access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4601 value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4602 }
4603
4604 // Capture src and dst memory state with a MergeMemNode
4605 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4606 if (src_type == dst_type) {
4607 // Types are equal, we don't need a MergeMemNode
4608 return memory(src_type);
4609 }
4610 MergeMemNode* merge = MergeMemNode::make(map()->memory());
4611 record_for_igvn(merge); // fold it up later, if possible
4612 int src_idx = C->get_alias_index(src_type);
4613 int dst_idx = C->get_alias_index(dst_type);
4614 merge->set_memory_at(src_idx, memory(src_idx));
4615 merge->set_memory_at(dst_idx, memory(dst_idx));
4616 return merge;
4617 }
4690 i_char->init_req(2, AddI(i_char, intcon(2)));
4691
4692 set_control(IfFalse(iff));
4693 set_memory(st, TypeAryPtr::BYTES);
4694 }
4695
4696 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4697 if (!field->is_constant()) {
4698 return nullptr; // Field not marked as constant.
4699 }
4700 ciInstance* holder = nullptr;
4701 if (!field->is_static()) {
4702 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4703 if (const_oop != nullptr && const_oop->is_instance()) {
4704 holder = const_oop->as_instance();
4705 }
4706 }
4707 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4708 /*is_unsigned_load=*/false);
4709 if (con_type != nullptr) {
4710 Node* con = makecon(con_type);
4711 if (field->type()->is_inlinetype()) {
4712 con = InlineTypeNode::make_from_oop(this, con, field->type()->as_inline_klass(), field->is_null_free());
4713 } else if (con_type->is_inlinetypeptr()) {
4714 con = InlineTypeNode::make_from_oop(this, con, con_type->inline_klass(), field->is_null_free());
4715 }
4716 return con;
4717 }
4718 return nullptr;
4719 }
4720
4721 //---------------------------load_mirror_from_klass----------------------------
4722 // Given a klass oop, load its java mirror (a java.lang.Class oop).
4723 Node* GraphKit::load_mirror_from_klass(Node* klass) {
4724 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
4725 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4726 // mirror = ((OopHandle)mirror)->resolve();
4727 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
4728 }
|