6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/ciUtilities.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "asm/register.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "gc/shared/c2/barrierSetC2.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "opto/addnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/convertnode.hpp"
38 #include "opto/graphKit.hpp"
39 #include "opto/idealKit.hpp"
40 #include "opto/intrinsicnode.hpp"
41 #include "opto/locknode.hpp"
42 #include "opto/machnode.hpp"
43 #include "opto/opaquenode.hpp"
44 #include "opto/parse.hpp"
45 #include "opto/rootnode.hpp"
46 #include "opto/runtime.hpp"
47 #include "opto/subtypenode.hpp"
48 #include "runtime/deoptimization.hpp"
49 #include "runtime/sharedRuntime.hpp"
50 #include "utilities/bitMap.inline.hpp"
51 #include "utilities/powerOfTwo.hpp"
52 #include "utilities/growableArray.hpp"
53
54 //----------------------------GraphKit-----------------------------------------
55 // Main utility constructor.
56 GraphKit::GraphKit(JVMState* jvms)
57 : Phase(Phase::Parser),
58 _env(C->env()),
59 _gvn(*C->initial_gvn()),
60 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
61 {
62 _exceptions = jvms->map()->next_exception();
63 if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr);
64 set_jvms(jvms);
65 }
66
67 // Private constructor for parser.
68 GraphKit::GraphKit()
69 : Phase(Phase::Parser),
70 _env(C->env()),
71 _gvn(*C->initial_gvn()),
72 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
73 {
74 _exceptions = nullptr;
75 set_map(nullptr);
76 debug_only(_sp = -99);
77 debug_only(set_bci(-99));
78 }
79
80
81
82 //---------------------------clean_stack---------------------------------------
83 // Clear away rubbish from the stack area of the JVM state.
84 // This destroys any arguments that may be waiting on the stack.
840 if (PrintMiscellaneous && (Verbose || WizardMode)) {
841 tty->print_cr("Zombie local %d: ", local);
842 jvms->dump();
843 }
844 return false;
845 }
846 }
847 }
848 return true;
849 }
850
851 #endif //ASSERT
852
853 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.
854 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
855 ciMethod* cur_method = jvms->method();
856 int cur_bci = jvms->bci();
857 if (cur_method != nullptr && cur_bci != InvocationEntryBci) {
858 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
859 return Interpreter::bytecode_should_reexecute(code) ||
860 (is_anewarray && code == Bytecodes::_multianewarray);
861 // Reexecute _multianewarray bytecode which was replaced with
862 // sequence of [a]newarray. See Parse::do_multianewarray().
863 //
864 // Note: interpreter should not have it set since this optimization
865 // is limited by dimensions and guarded by flag so in some cases
866 // multianewarray() runtime calls will be generated and
867 // the bytecode should not be reexecutes (stack will not be reset).
868 } else {
869 return false;
870 }
871 }
872
873 // Helper function for adding JVMState and debug information to node
874 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
875 // Add the safepoint edges to the call (or other safepoint).
876
877 // Make sure dead locals are set to top. This
878 // should help register allocation time and cut down on the size
879 // of the deoptimization information.
880 assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
931 }
932
933 // Presize the call:
934 DEBUG_ONLY(uint non_debug_edges = call->req());
935 call->add_req_batch(top(), youngest_jvms->debug_depth());
936 assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
937
938 // Set up edges so that the call looks like this:
939 // Call [state:] ctl io mem fptr retadr
940 // [parms:] parm0 ... parmN
941 // [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
942 // [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
943 // [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
944 // Note that caller debug info precedes callee debug info.
945
946 // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
947 uint debug_ptr = call->req();
948
949 // Loop over the map input edges associated with jvms, add them
950 // to the call node, & reset all offsets to match call node array.
951 for (JVMState* in_jvms = youngest_jvms; in_jvms != nullptr; ) {
952 uint debug_end = debug_ptr;
953 uint debug_start = debug_ptr - in_jvms->debug_size();
954 debug_ptr = debug_start; // back up the ptr
955
956 uint p = debug_start; // walks forward in [debug_start, debug_end)
957 uint j, k, l;
958 SafePointNode* in_map = in_jvms->map();
959 out_jvms->set_map(call);
960
961 if (can_prune_locals) {
962 assert(in_jvms->method() == out_jvms->method(), "sanity");
963 // If the current throw can reach an exception handler in this JVMS,
964 // then we must keep everything live that can reach that handler.
965 // As a quick and dirty approximation, we look for any handlers at all.
966 if (in_jvms->method()->has_exception_handlers()) {
967 can_prune_locals = false;
968 }
969 }
970
971 // Add the Locals
972 k = in_jvms->locoff();
973 l = in_jvms->loc_size();
974 out_jvms->set_locoff(p);
975 if (!can_prune_locals) {
976 for (j = 0; j < l; j++)
977 call->set_req(p++, in_map->in(k+j));
978 } else {
979 p += l; // already set to top above by add_req_batch
980 }
981
982 // Add the Expression Stack
983 k = in_jvms->stkoff();
984 l = in_jvms->sp();
985 out_jvms->set_stkoff(p);
986 if (!can_prune_locals) {
987 for (j = 0; j < l; j++)
988 call->set_req(p++, in_map->in(k+j));
989 } else if (can_prune_locals && stack_slots_not_pruned != 0) {
990 // Divide stack into {S0,...,S1}, where S0 is set to top.
991 uint s1 = stack_slots_not_pruned;
992 stack_slots_not_pruned = 0; // for next iteration
993 if (s1 > l) s1 = l;
994 uint s0 = l - s1;
995 p += s0; // skip the tops preinstalled by add_req_batch
996 for (j = s0; j < l; j++)
997 call->set_req(p++, in_map->in(k+j));
998 } else {
999 p += l; // already set to top above by add_req_batch
1000 }
1001
1002 // Add the Monitors
1003 k = in_jvms->monoff();
1004 l = in_jvms->mon_size();
1005 out_jvms->set_monoff(p);
1006 for (j = 0; j < l; j++)
1007 call->set_req(p++, in_map->in(k+j));
1008
1009 // Copy any scalar object fields.
1010 k = in_jvms->scloff();
1011 l = in_jvms->scl_size();
1012 out_jvms->set_scloff(p);
1013 for (j = 0; j < l; j++)
1014 call->set_req(p++, in_map->in(k+j));
1015
1016 // Finish the new jvms.
1017 out_jvms->set_endoff(p);
1018
1019 assert(out_jvms->endoff() == debug_end, "fill ptr must match");
1020 assert(out_jvms->depth() == in_jvms->depth(), "depth must match");
1021 assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match");
1022 assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match");
1023 assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match");
1024 assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
1025
1026 // Update the two tail pointers in parallel.
1027 out_jvms = out_jvms->caller();
1028 in_jvms = in_jvms->caller();
1029 }
1030
1031 assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
1032
1033 // Test the correctness of JVMState::debug_xxx accessors:
1034 assert(call->jvms()->debug_start() == non_debug_edges, "");
1035 assert(call->jvms()->debug_end() == call->req(), "");
1036 assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
1037 }
1038
1039 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
1040 Bytecodes::Code code = java_bc();
1041 if (code == Bytecodes::_wide) {
1042 code = method()->java_code_at_bci(bci() + 1);
1043 }
1044
1045 if (code != Bytecodes::_illegal) {
1046 depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
1182 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1183 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1184 return _gvn.transform( new AndLNode(conv, mask) );
1185 }
1186
1187 Node* GraphKit::ConvL2I(Node* offset) {
1188 // short-circuit a common case
1189 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1190 if (offset_con != (jlong)Type::OffsetBot) {
1191 return intcon((int) offset_con);
1192 }
1193 return _gvn.transform( new ConvL2INode(offset));
1194 }
1195
1196 //-------------------------load_object_klass-----------------------------------
1197 Node* GraphKit::load_object_klass(Node* obj) {
1198 // Special-case a fresh allocation to avoid building nodes:
1199 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1200 if (akls != nullptr) return akls;
1201 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1202 return _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), k_adr, TypeInstPtr::KLASS));
1203 }
1204
1205 //-------------------------load_array_length-----------------------------------
1206 Node* GraphKit::load_array_length(Node* array) {
1207 // Special-case a fresh allocation to avoid building nodes:
1208 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array);
1209 Node *alen;
1210 if (alloc == nullptr) {
1211 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1212 alen = _gvn.transform( new LoadRangeNode(nullptr, immutable_memory(), r_adr, TypeInt::POS));
1213 } else {
1214 alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false);
1215 }
1216 return alen;
1217 }
1218
1219 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc,
1220 const TypeOopPtr* oop_type,
1221 bool replace_length_in_map) {
1222 Node* length = alloc->Ideal_length();
1231 replace_in_map(length, ccast);
1232 }
1233 return ccast;
1234 }
1235 }
1236 return length;
1237 }
1238
1239 //------------------------------do_null_check----------------------------------
1240 // Helper function to do a null pointer check. Returned value is
1241 // the incoming address with null casted away. You are allowed to use the
1242 // not-null value only if you are control dependent on the test.
1243 #ifndef PRODUCT
1244 extern uint explicit_null_checks_inserted,
1245 explicit_null_checks_elided;
1246 #endif
1247 Node* GraphKit::null_check_common(Node* value, BasicType type,
1248 // optional arguments for variations:
1249 bool assert_null,
1250 Node* *null_control,
1251 bool speculative) {
1252 assert(!assert_null || null_control == nullptr, "not both at once");
1253 if (stopped()) return top();
1254 NOT_PRODUCT(explicit_null_checks_inserted++);
1255
1256 // Construct null check
1257 Node *chk = nullptr;
1258 switch(type) {
1259 case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1260 case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;
1261 case T_ARRAY : // fall through
1262 type = T_OBJECT; // simplify further tests
1263 case T_OBJECT : {
1264 const Type *t = _gvn.type( value );
1265
1266 const TypeOopPtr* tp = t->isa_oopptr();
1267 if (tp != nullptr && !tp->is_loaded()
1268 // Only for do_null_check, not any of its siblings:
1269 && !assert_null && null_control == nullptr) {
1270 // Usually, any field access or invocation on an unloaded oop type
1271 // will simply fail to link, since the statically linked class is
1272 // likely also to be unloaded. However, in -Xcomp mode, sometimes
1273 // the static class is loaded but the sharper oop type is not.
1274 // Rather than checking for this obscure case in lots of places,
1275 // we simply observe that a null check on an unloaded class
1339 }
1340 Node *oldcontrol = control();
1341 set_control(cfg);
1342 Node *res = cast_not_null(value);
1343 set_control(oldcontrol);
1344 NOT_PRODUCT(explicit_null_checks_elided++);
1345 return res;
1346 }
1347 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1348 if (cfg == nullptr) break; // Quit at region nodes
1349 depth++;
1350 }
1351 }
1352
1353 //-----------
1354 // Branch to failure if null
1355 float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
1356 Deoptimization::DeoptReason reason;
1357 if (assert_null) {
1358 reason = Deoptimization::reason_null_assert(speculative);
1359 } else if (type == T_OBJECT) {
1360 reason = Deoptimization::reason_null_check(speculative);
1361 } else {
1362 reason = Deoptimization::Reason_div0_check;
1363 }
1364 // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1365 // ciMethodData::has_trap_at will return a conservative -1 if any
1366 // must-be-null assertion has failed. This could cause performance
1367 // problems for a method after its first do_null_assert failure.
1368 // Consider using 'Reason_class_check' instead?
1369
1370 // To cause an implicit null check, we set the not-null probability
1371 // to the maximum (PROB_MAX). For an explicit check the probability
1372 // is set to a smaller value.
1373 if (null_control != nullptr || too_many_traps(reason)) {
1374 // probability is less likely
1375 ok_prob = PROB_LIKELY_MAG(3);
1376 } else if (!assert_null &&
1377 (ImplicitNullCheckThreshold > 0) &&
1378 method() != nullptr &&
1379 (method()->method_data()->trap_count(reason)
1413 }
1414
1415 if (assert_null) {
1416 // Cast obj to null on this path.
1417 replace_in_map(value, zerocon(type));
1418 return zerocon(type);
1419 }
1420
1421 // Cast obj to not-null on this path, if there is no null_control.
1422 // (If there is a null_control, a non-null value may come back to haunt us.)
1423 if (type == T_OBJECT) {
1424 Node* cast = cast_not_null(value, false);
1425 if (null_control == nullptr || (*null_control) == top())
1426 replace_in_map(value, cast);
1427 value = cast;
1428 }
1429
1430 return value;
1431 }
1432
1433
1434 //------------------------------cast_not_null----------------------------------
1435 // Cast obj to not-null on this path
1436 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1437 const Type *t = _gvn.type(obj);
1438 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1439 // Object is already not-null?
1440 if( t == t_not_null ) return obj;
1441
1442 Node* cast = new CastPPNode(control(), obj,t_not_null);
1443 cast = _gvn.transform( cast );
1444
1445 // Scan for instances of 'obj' in the current JVM mapping.
1446 // These instances are known to be not-null after the test.
1447 if (do_replace_in_map)
1448 replace_in_map(obj, cast);
1449
1450 return cast; // Return casted value
1451 }
1452
1453 // Sometimes in intrinsics, we implicitly know an object is not null
1454 // (there's no actual null check) so we can cast it to not null. In
1455 // the course of optimizations, the input to the cast can become null.
1456 // In that case that data path will die and we need the control path
1545 // These are layered on top of the factory methods in LoadNode and StoreNode,
1546 // and integrate with the parser's memory state and _gvn engine.
1547 //
1548
1549 // factory methods in "int adr_idx"
1550 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1551 int adr_idx,
1552 MemNode::MemOrd mo,
1553 LoadNode::ControlDependency control_dependency,
1554 bool require_atomic_access,
1555 bool unaligned,
1556 bool mismatched,
1557 bool unsafe,
1558 uint8_t barrier_data) {
1559 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1560 const TypePtr* adr_type = nullptr; // debug-mode-only argument
1561 debug_only(adr_type = C->get_adr_type(adr_idx));
1562 Node* mem = memory(adr_idx);
1563 Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
1564 ld = _gvn.transform(ld);
1565 if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1566 // Improve graph before escape analysis and boxing elimination.
1567 record_for_igvn(ld);
1568 if (ld->is_DecodeN()) {
1569 // Also record the actual load (LoadN) in case ld is DecodeN. In some
1570 // rare corner cases, ld->in(1) can be something other than LoadN (e.g.,
1571 // a Phi). Recording such cases is still perfectly sound, but may be
1572 // unnecessary and result in some minor IGVN overhead.
1573 record_for_igvn(ld->in(1));
1574 }
1575 }
1576 return ld;
1577 }
1578
1579 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1580 int adr_idx,
1581 MemNode::MemOrd mo,
1582 bool require_atomic_access,
1583 bool unaligned,
1584 bool mismatched,
1598 if (unsafe) {
1599 st->as_Store()->set_unsafe_access();
1600 }
1601 st->as_Store()->set_barrier_data(barrier_data);
1602 st = _gvn.transform(st);
1603 set_memory(st, adr_idx);
1604 // Back-to-back stores can only remove intermediate store with DU info
1605 // so push on worklist for optimizer.
1606 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1607 record_for_igvn(st);
1608
1609 return st;
1610 }
1611
1612 Node* GraphKit::access_store_at(Node* obj,
1613 Node* adr,
1614 const TypePtr* adr_type,
1615 Node* val,
1616 const Type* val_type,
1617 BasicType bt,
1618 DecoratorSet decorators) {
1619 // Transformation of a value which could be null pointer (CastPP #null)
1620 // could be delayed during Parse (for example, in adjust_map_after_if()).
1621 // Execute transformation here to avoid barrier generation in such case.
1622 if (_gvn.type(val) == TypePtr::NULL_PTR) {
1623 val = _gvn.makecon(TypePtr::NULL_PTR);
1624 }
1625
1626 if (stopped()) {
1627 return top(); // Dead path ?
1628 }
1629
1630 assert(val != nullptr, "not dead path");
1631
1632 C2AccessValuePtr addr(adr, adr_type);
1633 C2AccessValue value(val, val_type);
1634 C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1635 if (access.is_raw()) {
1636 return _barrier_set->BarrierSetC2::store_at(access, value);
1637 } else {
1638 return _barrier_set->store_at(access, value);
1639 }
1640 }
1641
1642 Node* GraphKit::access_load_at(Node* obj, // containing obj
1643 Node* adr, // actual address to store val at
1644 const TypePtr* adr_type,
1645 const Type* val_type,
1646 BasicType bt,
1647 DecoratorSet decorators) {
1648 if (stopped()) {
1649 return top(); // Dead path ?
1650 }
1651
1652 C2AccessValuePtr addr(adr, adr_type);
1653 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr);
1654 if (access.is_raw()) {
1655 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1656 } else {
1657 return _barrier_set->load_at(access, val_type);
1658 }
1659 }
1660
1661 Node* GraphKit::access_load(Node* adr, // actual address to load val at
1662 const Type* val_type,
1663 BasicType bt,
1664 DecoratorSet decorators) {
1665 if (stopped()) {
1666 return top(); // Dead path ?
1667 }
1668
1669 C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());
1670 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, nullptr, addr);
1671 if (access.is_raw()) {
1672 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1673 } else {
1738 Node* new_val,
1739 const Type* value_type,
1740 BasicType bt,
1741 DecoratorSet decorators) {
1742 C2AccessValuePtr addr(adr, adr_type);
1743 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1744 if (access.is_raw()) {
1745 return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1746 } else {
1747 return _barrier_set->atomic_add_at(access, new_val, value_type);
1748 }
1749 }
1750
1751 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
1752 return _barrier_set->clone(this, src, dst, size, is_array);
1753 }
1754
1755 //-------------------------array_element_address-------------------------
1756 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1757 const TypeInt* sizetype, Node* ctrl) {
1758 uint shift = exact_log2(type2aelembytes(elembt));
1759 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1760
1761 // short-circuit a common case (saves lots of confusing waste motion)
1762 jint idx_con = find_int_con(idx, -1);
1763 if (idx_con >= 0) {
1764 intptr_t offset = header + ((intptr_t)idx_con << shift);
1765 return basic_plus_adr(ary, offset);
1766 }
1767
1768 // must be correct type for alignment purposes
1769 Node* base = basic_plus_adr(ary, header);
1770 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1771 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1772 return basic_plus_adr(ary, base, scale);
1773 }
1774
1775 //-------------------------load_array_element-------------------------
1776 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
1777 const Type* elemtype = arytype->elem();
1778 BasicType elembt = elemtype->array_element_basic_type();
1779 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1780 if (elembt == T_NARROWOOP) {
1781 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1782 }
1783 Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,
1784 IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0));
1785 return ld;
1786 }
1787
1788 //-------------------------set_arguments_for_java_call-------------------------
1789 // Arguments (pre-popped from the stack) are taken from the JVMS.
1790 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1791 // Add the call arguments:
1792 uint nargs = call->method()->arg_size();
1793 for (uint i = 0; i < nargs; i++) {
1794 Node* arg = argument(i);
1795 call->init_req(i + TypeFunc::Parms, arg);
1796 }
1797 }
1798
1799 //---------------------------set_edges_for_java_call---------------------------
1800 // Connect a newly created call into the current JVMS.
1801 // A return value node (if any) is returned from set_edges_for_java_call.
1802 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1803
1804 // Add the predefined inputs:
1805 call->init_req( TypeFunc::Control, control() );
1806 call->init_req( TypeFunc::I_O , i_o() );
1807 call->init_req( TypeFunc::Memory , reset_memory() );
1808 call->init_req( TypeFunc::FramePtr, frameptr() );
1809 call->init_req( TypeFunc::ReturnAdr, top() );
1810
1811 add_safepoint_edges(call, must_throw);
1812
1813 Node* xcall = _gvn.transform(call);
1814
1815 if (xcall == top()) {
1816 set_control(top());
1817 return;
1818 }
1819 assert(xcall == call, "call identity is stable");
1820
1821 // Re-use the current map to produce the result.
1822
1823 set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1824 set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj)));
1825 set_all_memory_call(xcall, separate_io_proj);
1826
1827 //return xcall; // no need, caller already has it
1828 }
1829
1830 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1831 if (stopped()) return top(); // maybe the call folded up?
1832
1833 // Capture the return value, if any.
1834 Node* ret;
1835 if (call->method() == nullptr ||
1836 call->method()->return_type()->basic_type() == T_VOID)
1837 ret = top();
1838 else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1839
1840 // Note: Since any out-of-line call can produce an exception,
1841 // we always insert an I_O projection from the call into the result.
1842
1843 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1844
1845 if (separate_io_proj) {
1846 // The caller requested separate projections be used by the fall
1847 // through and exceptional paths, so replace the projections for
1848 // the fall through path.
1849 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1850 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1851 }
1852 return ret;
1853 }
1854
1855 //--------------------set_predefined_input_for_runtime_call--------------------
1856 // Reading and setting the memory state is way conservative here.
1857 // The real problem is that I am not doing real Type analysis on memory,
1858 // so I cannot distinguish card mark stores from other stores. Across a GC
1859 // point the Store Barrier and the card mark memory has to agree. I cannot
1860 // have a card mark store and its barrier split across the GC point from
1861 // either above or below. Here I get that to happen by reading ALL of memory.
1862 // A better answer would be to separate out card marks from other memory.
1863 // For now, return the input memory state, so that it can be reused
1864 // after the call, if this call has restricted memory effects.
1865 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
1866 // Set fixed predefined input arguments
1867 Node* memory = reset_memory();
1868 Node* m = narrow_mem == nullptr ? memory : narrow_mem;
1869 call->init_req( TypeFunc::Control, control() );
1870 call->init_req( TypeFunc::I_O, top() ); // does no i/o
1871 call->init_req( TypeFunc::Memory, m ); // may gc ptrs
1922 if (use->is_MergeMem()) {
1923 wl.push(use);
1924 }
1925 }
1926 }
1927
1928 // Replace the call with the current state of the kit.
1929 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
1930 JVMState* ejvms = nullptr;
1931 if (has_exceptions()) {
1932 ejvms = transfer_exceptions_into_jvms();
1933 }
1934
1935 ReplacedNodes replaced_nodes = map()->replaced_nodes();
1936 ReplacedNodes replaced_nodes_exception;
1937 Node* ex_ctl = top();
1938
1939 SafePointNode* final_state = stop();
1940
1941 // Find all the needed outputs of this call
1942 CallProjections callprojs;
1943 call->extract_projections(&callprojs, true);
1944
1945 Unique_Node_List wl;
1946 Node* init_mem = call->in(TypeFunc::Memory);
1947 Node* final_mem = final_state->in(TypeFunc::Memory);
1948 Node* final_ctl = final_state->in(TypeFunc::Control);
1949 Node* final_io = final_state->in(TypeFunc::I_O);
1950
1951 // Replace all the old call edges with the edges from the inlining result
1952 if (callprojs.fallthrough_catchproj != nullptr) {
1953 C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
1954 }
1955 if (callprojs.fallthrough_memproj != nullptr) {
1956 if (final_mem->is_MergeMem()) {
1957 // Parser's exits MergeMem was not transformed but may be optimized
1958 final_mem = _gvn.transform(final_mem);
1959 }
1960 C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem);
1961 add_mergemem_users_to_worklist(wl, final_mem);
1962 }
1963 if (callprojs.fallthrough_ioproj != nullptr) {
1964 C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io);
1965 }
1966
1967 // Replace the result with the new result if it exists and is used
1968 if (callprojs.resproj != nullptr && result != nullptr) {
1969 C->gvn_replace_by(callprojs.resproj, result);
1970 }
1971
1972 if (ejvms == nullptr) {
1973 // No exception edges to simply kill off those paths
1974 if (callprojs.catchall_catchproj != nullptr) {
1975 C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
1976 }
1977 if (callprojs.catchall_memproj != nullptr) {
1978 C->gvn_replace_by(callprojs.catchall_memproj, C->top());
1979 }
1980 if (callprojs.catchall_ioproj != nullptr) {
1981 C->gvn_replace_by(callprojs.catchall_ioproj, C->top());
1982 }
1983 // Replace the old exception object with top
1984 if (callprojs.exobj != nullptr) {
1985 C->gvn_replace_by(callprojs.exobj, C->top());
1986 }
1987 } else {
1988 GraphKit ekit(ejvms);
1989
1990 // Load my combined exception state into the kit, with all phis transformed:
1991 SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
1992 replaced_nodes_exception = ex_map->replaced_nodes();
1993
1994 Node* ex_oop = ekit.use_exception_state(ex_map);
1995
1996 if (callprojs.catchall_catchproj != nullptr) {
1997 C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
1998 ex_ctl = ekit.control();
1999 }
2000 if (callprojs.catchall_memproj != nullptr) {
2001 Node* ex_mem = ekit.reset_memory();
2002 C->gvn_replace_by(callprojs.catchall_memproj, ex_mem);
2003 add_mergemem_users_to_worklist(wl, ex_mem);
2004 }
2005 if (callprojs.catchall_ioproj != nullptr) {
2006 C->gvn_replace_by(callprojs.catchall_ioproj, ekit.i_o());
2007 }
2008
2009 // Replace the old exception object with the newly created one
2010 if (callprojs.exobj != nullptr) {
2011 C->gvn_replace_by(callprojs.exobj, ex_oop);
2012 }
2013 }
2014
2015 // Disconnect the call from the graph
2016 call->disconnect_inputs(C);
2017 C->gvn_replace_by(call, C->top());
2018
2019 // Clean up any MergeMems that feed other MergeMems since the
2020 // optimizer doesn't like that.
2021 while (wl.size() > 0) {
2022 _gvn.transform(wl.pop());
2023 }
2024
2025 if (callprojs.fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) {
2026 replaced_nodes.apply(C, final_ctl);
2027 }
2028 if (!ex_ctl->is_top() && do_replaced_nodes) {
2029 replaced_nodes_exception.apply(C, ex_ctl);
2030 }
2031 }
2032
2033
2034 //------------------------------increment_counter------------------------------
2035 // for statistics: increment a VM counter by 1
2036
2037 void GraphKit::increment_counter(address counter_addr) {
2038 Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2039 increment_counter(adr1);
2040 }
2041
2042 void GraphKit::increment_counter(Node* counter_addr) {
2043 int adr_type = Compile::AliasIdxRaw;
2044 Node* ctrl = control();
2045 Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered);
2204 *
2205 * @param n node that the type applies to
2206 * @param exact_kls type from profiling
2207 * @param maybe_null did profiling see null?
2208 *
2209 * @return node with improved type
2210 */
2211 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2212 const Type* current_type = _gvn.type(n);
2213 assert(UseTypeSpeculation, "type speculation must be on");
2214
2215 const TypePtr* speculative = current_type->speculative();
2216
2217 // Should the klass from the profile be recorded in the speculative type?
2218 if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2219 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls, Type::trust_interfaces);
2220 const TypeOopPtr* xtype = tklass->as_instance_type();
2221 assert(xtype->klass_is_exact(), "Should be exact");
2222 // Any reason to believe n is not null (from this profiling or a previous one)?
2223 assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2224 const TypePtr* ptr = (ptr_kind == ProfileMaybeNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2225 // record the new speculative type's depth
2226 speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2227 speculative = speculative->with_inline_depth(jvms()->depth());
2228 } else if (current_type->would_improve_ptr(ptr_kind)) {
2229 // Profiling report that null was never seen so we can change the
2230 // speculative type to non null ptr.
2231 if (ptr_kind == ProfileAlwaysNull) {
2232 speculative = TypePtr::NULL_PTR;
2233 } else {
2234 assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2235 const TypePtr* ptr = TypePtr::NOTNULL;
2236 if (speculative != nullptr) {
2237 speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2238 } else {
2239 speculative = ptr;
2240 }
2241 }
2242 }
2243
2244 if (speculative != current_type->speculative()) {
2245 // Build a type with a speculative type (what we think we know
2246 // about the type but will need a guard when we use it)
2247 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative);
2248 // We're changing the type, we need a new CheckCast node to carry
2249 // the new type. The new type depends on the control: what
2250 // profiling tells us is only valid from here as far as we can
2251 // tell.
2252 Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2253 cast = _gvn.transform(cast);
2254 replace_in_map(n, cast);
2255 n = cast;
2256 }
2257
2258 return n;
2259 }
2260
2261 /**
2262 * Record profiling data from receiver profiling at an invoke with the
2263 * type system so that it can propagate it (speculation)
2264 *
2265 * @param n receiver node
2266 *
2267 * @return node with improved type
2268 */
2269 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2270 if (!UseTypeSpeculation) {
2271 return n;
2272 }
2273 ciKlass* exact_kls = profile_has_unique_klass();
2274 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2275 if ((java_bc() == Bytecodes::_checkcast ||
2276 java_bc() == Bytecodes::_instanceof ||
2277 java_bc() == Bytecodes::_aastore) &&
2278 method()->method_data()->is_mature()) {
2279 ciProfileData* data = method()->method_data()->bci_to_data(bci());
2280 if (data != nullptr) {
2281 if (!data->as_BitData()->null_seen()) {
2282 ptr_kind = ProfileNeverNull;
2283 } else {
2284 assert(data->is_ReceiverTypeData(), "bad profile data type");
2285 ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2286 uint i = 0;
2287 for (; i < call->row_limit(); i++) {
2288 ciKlass* receiver = call->receiver(i);
2289 if (receiver != nullptr) {
2290 break;
2291 }
2292 }
2293 ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2294 }
2295 }
2296 }
2297 return record_profile_for_speculation(n, exact_kls, ptr_kind);
2298 }
2299
2300 /**
2301 * Record profiling data from argument profiling at an invoke with the
2302 * type system so that it can propagate it (speculation)
2303 *
2304 * @param dest_method target method for the call
2305 * @param bc what invoke bytecode is this?
2306 */
2307 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2308 if (!UseTypeSpeculation) {
2309 return;
2310 }
2311 const TypeFunc* tf = TypeFunc::make(dest_method);
2312 int nargs = tf->domain()->cnt() - TypeFunc::Parms;
2313 int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2314 for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2315 const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
2316 if (is_reference_type(targ->basic_type())) {
2317 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2318 ciKlass* better_type = nullptr;
2319 if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2320 record_profile_for_speculation(argument(j), better_type, ptr_kind);
2321 }
2322 i++;
2323 }
2324 }
2325 }
2326
2327 /**
2328 * Record profiling data from parameter profiling at an invoke with
2329 * the type system so that it can propagate it (speculation)
2330 */
2331 void GraphKit::record_profiled_parameters_for_speculation() {
2332 if (!UseTypeSpeculation) {
2333 return;
2334 }
2335 for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
2349 * the type system so that it can propagate it (speculation)
2350 */
2351 void GraphKit::record_profiled_return_for_speculation() {
2352 if (!UseTypeSpeculation) {
2353 return;
2354 }
2355 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2356 ciKlass* better_type = nullptr;
2357 if (method()->return_profiled_type(bci(), better_type, ptr_kind)) {
2358 // If profiling reports a single type for the return value,
2359 // feed it to the type system so it can propagate it as a
2360 // speculative type
2361 record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind);
2362 }
2363 }
2364
2365 void GraphKit::round_double_arguments(ciMethod* dest_method) {
2366 if (Matcher::strict_fp_requires_explicit_rounding) {
2367 // (Note: TypeFunc::make has a cache that makes this fast.)
2368 const TypeFunc* tf = TypeFunc::make(dest_method);
2369 int nargs = tf->domain()->cnt() - TypeFunc::Parms;
2370 for (int j = 0; j < nargs; j++) {
2371 const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
2372 if (targ->basic_type() == T_DOUBLE) {
2373 // If any parameters are doubles, they must be rounded before
2374 // the call, dprecision_rounding does gvn.transform
2375 Node *arg = argument(j);
2376 arg = dprecision_rounding(arg);
2377 set_argument(j, arg);
2378 }
2379 }
2380 }
2381 }
2382
2383 // rounding for strict float precision conformance
2384 Node* GraphKit::precision_rounding(Node* n) {
2385 if (Matcher::strict_fp_requires_explicit_rounding) {
2386 #ifdef IA32
2387 if (UseSSE == 0) {
2388 return _gvn.transform(new RoundFloatNode(nullptr, n));
2389 }
2390 #else
2391 Unimplemented();
2500 // The first null ends the list.
2501 Node* parm0, Node* parm1,
2502 Node* parm2, Node* parm3,
2503 Node* parm4, Node* parm5,
2504 Node* parm6, Node* parm7) {
2505 assert(call_addr != nullptr, "must not call null targets");
2506
2507 // Slow-path call
2508 bool is_leaf = !(flags & RC_NO_LEAF);
2509 bool has_io = (!is_leaf && !(flags & RC_NO_IO));
2510 if (call_name == nullptr) {
2511 assert(!is_leaf, "must supply name for leaf");
2512 call_name = OptoRuntime::stub_name(call_addr);
2513 }
2514 CallNode* call;
2515 if (!is_leaf) {
2516 call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
2517 } else if (flags & RC_NO_FP) {
2518 call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2519 } else if (flags & RC_VECTOR){
2520 uint num_bits = call_type->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
2521 call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
2522 } else {
2523 call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2524 }
2525
2526 // The following is similar to set_edges_for_java_call,
2527 // except that the memory effects of the call are restricted to AliasIdxRaw.
2528
2529 // Slow path call has no side-effects, uses few values
2530 bool wide_in = !(flags & RC_NARROW_MEM);
2531 bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2532
2533 Node* prev_mem = nullptr;
2534 if (wide_in) {
2535 prev_mem = set_predefined_input_for_runtime_call(call);
2536 } else {
2537 assert(!wide_out, "narrow in => narrow out");
2538 Node* narrow_mem = memory(adr_type);
2539 prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2540 }
2580
2581 if (has_io) {
2582 set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2583 }
2584 return call;
2585
2586 }
2587
2588 // i2b
2589 Node* GraphKit::sign_extend_byte(Node* in) {
2590 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
2591 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
2592 }
2593
2594 // i2s
2595 Node* GraphKit::sign_extend_short(Node* in) {
2596 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
2597 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
2598 }
2599
2600 //------------------------------merge_memory-----------------------------------
2601 // Merge memory from one path into the current memory state.
2602 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
2603 for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
2604 Node* old_slice = mms.force_memory();
2605 Node* new_slice = mms.memory2();
2606 if (old_slice != new_slice) {
2607 PhiNode* phi;
2608 if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
2609 if (mms.is_empty()) {
2610 // clone base memory Phi's inputs for this memory slice
2611 assert(old_slice == mms.base_memory(), "sanity");
2612 phi = PhiNode::make(region, nullptr, Type::MEMORY, mms.adr_type(C));
2613 _gvn.set_type(phi, Type::MEMORY);
2614 for (uint i = 1; i < phi->req(); i++) {
2615 phi->init_req(i, old_slice->in(i));
2616 }
2617 } else {
2618 phi = old_slice->as_Phi(); // Phi was generated already
2619 }
2882
2883 // Now do a linear scan of the secondary super-klass array. Again, no real
2884 // performance impact (too rare) but it's gotta be done.
2885 // Since the code is rarely used, there is no penalty for moving it
2886 // out of line, and it can only improve I-cache density.
2887 // The decision to inline or out-of-line this final check is platform
2888 // dependent, and is found in the AD file definition of PartialSubtypeCheck.
2889 Node* psc = gvn.transform(
2890 new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
2891
2892 IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
2893 r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
2894 r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
2895
2896 // Return false path; set default control to true path.
2897 *ctrl = gvn.transform(r_ok_subtype);
2898 return gvn.transform(r_not_subtype);
2899 }
2900
2901 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
2902 bool expand_subtype_check = C->post_loop_opts_phase(); // macro node expansion is over
2903 if (expand_subtype_check) {
2904 MergeMemNode* mem = merged_memory();
2905 Node* ctrl = control();
2906 Node* subklass = obj_or_subklass;
2907 if (!_gvn.type(obj_or_subklass)->isa_klassptr()) {
2908 subklass = load_object_klass(obj_or_subklass);
2909 }
2910
2911 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn, method(), bci());
2912 set_control(ctrl);
2913 return n;
2914 }
2915
2916 Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass, method(), bci()));
2917 Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
2918 IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
2919 set_control(_gvn.transform(new IfTrueNode(iff)));
2920 return _gvn.transform(new IfFalseNode(iff));
2921 }
2922
2923 // Profile-driven exact type check:
2924 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
2925 float prob,
2926 Node* *casted_receiver) {
2927 assert(!klass->is_interface(), "no exact type check on interfaces");
2928
2929 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces);
2930 Node* recv_klass = load_object_klass(receiver);
2931 Node* want_klass = makecon(tklass);
2932 Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
2933 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
2934 IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
2935 set_control( _gvn.transform(new IfTrueNode (iff)));
2936 Node* fail = _gvn.transform(new IfFalseNode(iff));
2937
2938 if (!stopped()) {
2939 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
2940 const TypeOopPtr* recvx_type = tklass->as_instance_type();
2941 assert(recvx_type->klass_is_exact(), "");
2942
2943 if (!receiver_type->higher_equal(recvx_type)) { // ignore redundant casts
2944 // Subsume downstream occurrences of receiver with a cast to
2945 // recv_xtype, since now we know what the type will be.
2946 Node* cast = new CheckCastPPNode(control(), receiver, recvx_type);
2947 (*casted_receiver) = _gvn.transform(cast);
2948 assert(!(*casted_receiver)->is_top(), "that path should be unreachable");
2949 // (User must make the replace_in_map call.)
2950 }
2951 }
2952
2953 return fail;
2954 }
2955
2956 //------------------------------subtype_check_receiver-------------------------
2957 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
2958 Node** casted_receiver) {
2959 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces)->try_improve();
2960 Node* want_klass = makecon(tklass);
2961
2962 Node* slow_ctl = gen_subtype_check(receiver, want_klass);
2963
2964 // Ignore interface type information until interface types are properly tracked.
2965 if (!stopped() && !klass->is_interface()) {
2966 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
2967 const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
2968 if (!receiver_type->higher_equal(recv_type)) { // ignore redundant casts
2969 Node* cast = new CheckCastPPNode(control(), receiver, recv_type);
2970 (*casted_receiver) = _gvn.transform(cast);
2971 }
2972 }
2973
2974 return slow_ctl;
2975 }
2976
2977 //------------------------------seems_never_null-------------------------------
2978 // Use null_seen information if it is available from the profile.
2979 // If we see an unexpected null at a type check we record it and force a
2980 // recompile; the offending check will be recompiled to handle nulls.
2981 // If we see several offending BCIs, then all checks in the
2982 // method will be recompiled.
2983 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
2984 speculating = !_gvn.type(obj)->speculative_maybe_null();
2985 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
2986 if (UncommonNullCast // Cutout for this technique
2987 && obj != null() // And not the -Xcomp stupid case?
2988 && !too_many_traps(reason)
2989 ) {
2990 if (speculating) {
3059
3060 //------------------------maybe_cast_profiled_receiver-------------------------
3061 // If the profile has seen exactly one type, narrow to exactly that type.
3062 // Subsequent type checks will always fold up.
3063 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3064 const TypeKlassPtr* require_klass,
3065 ciKlass* spec_klass,
3066 bool safe_for_replace) {
3067 if (!UseTypeProfile || !TypeProfileCasts) return nullptr;
3068
3069 Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != nullptr);
3070
3071 // Make sure we haven't already deoptimized from this tactic.
3072 if (too_many_traps_or_recompiles(reason))
3073 return nullptr;
3074
3075 // (No, this isn't a call, but it's enough like a virtual call
3076 // to use the same ciMethod accessor to get the profile info...)
3077 // If we have a speculative type use it instead of profiling (which
3078 // may not help us)
3079 ciKlass* exact_kls = spec_klass == nullptr ? profile_has_unique_klass() : spec_klass;
3080 if (exact_kls != nullptr) {// no cast failures here
3081 if (require_klass == nullptr ||
3082 C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) {
3083 // If we narrow the type to match what the type profile sees or
3084 // the speculative type, we can then remove the rest of the
3085 // cast.
3086 // This is a win, even if the exact_kls is very specific,
3087 // because downstream operations, such as method calls,
3088 // will often benefit from the sharper type.
3089 Node* exact_obj = not_null_obj; // will get updated in place...
3090 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
3091 &exact_obj);
3092 { PreserveJVMState pjvms(this);
3093 set_control(slow_ctl);
3094 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3095 }
3096 if (safe_for_replace) {
3097 replace_in_map(not_null_obj, exact_obj);
3098 }
3099 return exact_obj;
3189 // If not_null_obj is dead, only null-path is taken
3190 if (stopped()) { // Doing instance-of on a null?
3191 set_control(null_ctl);
3192 return intcon(0);
3193 }
3194 region->init_req(_null_path, null_ctl);
3195 phi ->init_req(_null_path, intcon(0)); // Set null path value
3196 if (null_ctl == top()) {
3197 // Do this eagerly, so that pattern matches like is_diamond_phi
3198 // will work even during parsing.
3199 assert(_null_path == PATH_LIMIT-1, "delete last");
3200 region->del_req(_null_path);
3201 phi ->del_req(_null_path);
3202 }
3203
3204 // Do we know the type check always succeed?
3205 bool known_statically = false;
3206 if (_gvn.type(superklass)->singleton()) {
3207 const TypeKlassPtr* superk = _gvn.type(superklass)->is_klassptr();
3208 const TypeKlassPtr* subk = _gvn.type(obj)->is_oopptr()->as_klass_type();
3209 if (subk->is_loaded()) {
3210 int static_res = C->static_subtype_check(superk, subk);
3211 known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
3212 }
3213 }
3214
3215 if (!known_statically) {
3216 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3217 // We may not have profiling here or it may not help us. If we
3218 // have a speculative type use it to perform an exact cast.
3219 ciKlass* spec_obj_type = obj_type->speculative_type();
3220 if (spec_obj_type != nullptr || (ProfileDynamicTypes && data != nullptr)) {
3221 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, nullptr, spec_obj_type, safe_for_replace);
3222 if (stopped()) { // Profile disagrees with this path.
3223 set_control(null_ctl); // Null is the only remaining possibility.
3224 return intcon(0);
3225 }
3226 if (cast_obj != nullptr) {
3227 not_null_obj = cast_obj;
3228 }
3229 }
3245 record_for_igvn(region);
3246
3247 // If we know the type check always succeeds then we don't use the
3248 // profiling data at this bytecode. Don't lose it, feed it to the
3249 // type system as a speculative type.
3250 if (safe_for_replace) {
3251 Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3252 replace_in_map(obj, casted_obj);
3253 }
3254
3255 return _gvn.transform(phi);
3256 }
3257
3258 //-------------------------------gen_checkcast---------------------------------
3259 // Generate a checkcast idiom. Used by both the checkcast bytecode and the
3260 // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
3261 // uncommon-trap paths work. Adjust stack after this call.
3262 // If failure_control is supplied and not null, it is filled in with
3263 // the control edge for the cast failure. Otherwise, an appropriate
3264 // uncommon trap or exception is thrown.
3265 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
3266 Node* *failure_control) {
3267 kill_dead_locals(); // Benefit all the uncommon traps
3268 const TypeKlassPtr* klass_ptr_type = _gvn.type(superklass)->is_klassptr();
3269 const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve();
3270 const TypeOopPtr* toop = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type();
3271
3272 // Fast cutout: Check the case that the cast is vacuously true.
3273 // This detects the common cases where the test will short-circuit
3274 // away completely. We do this before we perform the null check,
3275 // because if the test is going to turn into zero code, we don't
3276 // want a residual null check left around. (Causes a slowdown,
3277 // for example, in some objArray manipulations, such as a[i]=a[j].)
3278 if (improved_klass_ptr_type->singleton()) {
3279 const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
3280 if (objtp != nullptr) {
3281 switch (C->static_subtype_check(improved_klass_ptr_type, objtp->as_klass_type())) {
3282 case Compile::SSC_always_true:
3283 // If we know the type check always succeed then we don't use
3284 // the profiling data at this bytecode. Don't lose it, feed it
3285 // to the type system as a speculative type.
3286 return record_profiled_receiver_for_speculation(obj);
3287 case Compile::SSC_always_false:
3288 // It needs a null check because a null will *pass* the cast check.
3289 // A non-null value will always produce an exception.
3290 if (!objtp->maybe_null()) {
3291 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3292 Deoptimization::DeoptReason reason = is_aastore ?
3293 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3294 builtin_throw(reason);
3295 return top();
3296 } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3297 return null_assert(obj);
3298 }
3299 break; // Fall through to full check
3300 default:
3301 break;
3302 }
3303 }
3304 }
3305
3306 ciProfileData* data = nullptr;
3307 bool safe_for_replace = false;
3308 if (failure_control == nullptr) { // use MDO in regular case only
3309 assert(java_bc() == Bytecodes::_aastore ||
3310 java_bc() == Bytecodes::_checkcast,
3311 "interpreter profiles type checks only for these BCs");
3312 data = method()->method_data()->bci_to_data(bci());
3313 safe_for_replace = true;
3314 }
3315
3316 // Make the merge point
3317 enum { _obj_path = 1, _null_path, PATH_LIMIT };
3318 RegionNode* region = new RegionNode(PATH_LIMIT);
3319 Node* phi = new PhiNode(region, toop);
3320 C->set_has_split_ifs(true); // Has chance for split-if optimization
3321
3322 // Use null-cast information if it is available
3323 bool speculative_not_null = false;
3324 bool never_see_null = ((failure_control == nullptr) // regular case only
3325 && seems_never_null(obj, data, speculative_not_null));
3326
3327 // Null check; get casted pointer; set region slot 3
3328 Node* null_ctl = top();
3329 Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3330
3331 // If not_null_obj is dead, only null-path is taken
3332 if (stopped()) { // Doing instance-of on a null?
3333 set_control(null_ctl);
3334 return null();
3335 }
3336 region->init_req(_null_path, null_ctl);
3337 phi ->init_req(_null_path, null()); // Set null path value
3338 if (null_ctl == top()) {
3339 // Do this eagerly, so that pattern matches like is_diamond_phi
3340 // will work even during parsing.
3341 assert(_null_path == PATH_LIMIT-1, "delete last");
3342 region->del_req(_null_path);
3343 phi ->del_req(_null_path);
3344 }
3345
3346 Node* cast_obj = nullptr;
3347 if (improved_klass_ptr_type->klass_is_exact()) {
3348 // The following optimization tries to statically cast the speculative type of the object
3349 // (for example obtained during profiling) to the type of the superklass and then do a
3350 // dynamic check that the type of the object is what we expect. To work correctly
3351 // for checkcast and aastore the type of superklass should be exact.
3352 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3353 // We may not have profiling here or it may not help us. If we have
3354 // a speculative type use it to perform an exact cast.
3355 ciKlass* spec_obj_type = obj_type->speculative_type();
3356 if (spec_obj_type != nullptr || data != nullptr) {
3357 cast_obj = maybe_cast_profiled_receiver(not_null_obj, improved_klass_ptr_type, spec_obj_type, safe_for_replace);
3358 if (cast_obj != nullptr) {
3359 if (failure_control != nullptr) // failure is now impossible
3360 (*failure_control) = top();
3361 // adjust the type of the phi to the exact klass:
3362 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3363 }
3364 }
3365 }
3366
3367 if (cast_obj == nullptr) {
3368 // Generate the subtype check
3369 Node* improved_superklass = superklass;
3370 if (improved_klass_ptr_type != klass_ptr_type && improved_klass_ptr_type->singleton()) {
3371 improved_superklass = makecon(improved_klass_ptr_type);
3372 }
3373 Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, improved_superklass);
3374
3375 // Plug in success path into the merge
3376 cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3377 // Failure path ends in uncommon trap (or may be dead - failure impossible)
3378 if (failure_control == nullptr) {
3379 if (not_subtype_ctrl != top()) { // If failure is possible
3380 PreserveJVMState pjvms(this);
3381 set_control(not_subtype_ctrl);
3382 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3383 Deoptimization::DeoptReason reason = is_aastore ?
3384 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3385 builtin_throw(reason);
3386 }
3387 } else {
3388 (*failure_control) = not_subtype_ctrl;
3389 }
3390 }
3391
3392 region->init_req(_obj_path, control());
3393 phi ->init_req(_obj_path, cast_obj);
3394
3395 // A merge of null or Casted-NotNull obj
3396 Node* res = _gvn.transform(phi);
3397
3398 // Note I do NOT always 'replace_in_map(obj,result)' here.
3399 // if( tk->klass()->can_be_primary_super() )
3400 // This means that if I successfully store an Object into an array-of-String
3401 // I 'forget' that the Object is really now known to be a String. I have to
3402 // do this because we don't have true union types for interfaces - if I store
3403 // a Baz into an array-of-Interface and then tell the optimizer it's an
3404 // Interface, I forget that it's also a Baz and cannot do Baz-like field
3405 // references to it. FIX THIS WHEN UNION TYPES APPEAR!
3406 // replace_in_map( obj, res );
3407
3408 // Return final merged results
3409 set_control( _gvn.transform(region) );
3410 record_for_igvn(region);
3411
3412 return record_profiled_receiver_for_speculation(res);
3413 }
3414
3415 //------------------------------next_monitor-----------------------------------
3416 // What number should be given to the next monitor?
3417 int GraphKit::next_monitor() {
3418 int current = jvms()->monitor_depth()* C->sync_stack_slots();
3419 int next = current + C->sync_stack_slots();
3420 // Keep the toplevel high water mark current:
3421 if (C->fixed_slots() < next) C->set_fixed_slots(next);
3422 return current;
3423 }
3424
3425 //------------------------------insert_mem_bar---------------------------------
3426 // Memory barrier to avoid floating things around
3427 // The membar serves as a pinch point between both control and all memory slices.
3428 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3429 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3430 mb->init_req(TypeFunc::Control, control());
3431 mb->init_req(TypeFunc::Memory, reset_memory());
3432 Node* membar = _gvn.transform(mb);
3460 }
3461 Node* membar = _gvn.transform(mb);
3462 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3463 if (alias_idx == Compile::AliasIdxBot) {
3464 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3465 } else {
3466 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3467 }
3468 return membar;
3469 }
3470
3471 //------------------------------shared_lock------------------------------------
3472 // Emit locking code.
3473 FastLockNode* GraphKit::shared_lock(Node* obj) {
3474 // bci is either a monitorenter bc or InvocationEntryBci
3475 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3476 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3477
3478 if( !GenerateSynchronizationCode )
3479 return nullptr; // Not locking things?
3480 if (stopped()) // Dead monitor?
3481 return nullptr;
3482
3483 assert(dead_locals_are_killed(), "should kill locals before sync. point");
3484
3485 // Box the stack location
3486 Node* box = new BoxLockNode(next_monitor());
3487 // Check for bailout after new BoxLockNode
3488 if (failing()) { return nullptr; }
3489 box = _gvn.transform(box);
3490 Node* mem = reset_memory();
3491
3492 FastLockNode * flock = _gvn.transform(new FastLockNode(nullptr, obj, box) )->as_FastLock();
3493
3494 // Add monitor to debug info for the slow path. If we block inside the
3495 // slow path and de-opt, we need the monitor hanging around
3496 map()->push_monitor( flock );
3497
3498 const TypeFunc *tf = LockNode::lock_type();
3499 LockNode *lock = new LockNode(C, tf);
3528 }
3529 #endif
3530
3531 return flock;
3532 }
3533
3534
3535 //------------------------------shared_unlock----------------------------------
3536 // Emit unlocking code.
3537 void GraphKit::shared_unlock(Node* box, Node* obj) {
3538 // bci is either a monitorenter bc or InvocationEntryBci
3539 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3540 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3541
3542 if( !GenerateSynchronizationCode )
3543 return;
3544 if (stopped()) { // Dead monitor?
3545 map()->pop_monitor(); // Kill monitor from debug info
3546 return;
3547 }
3548
3549 // Memory barrier to avoid floating things down past the locked region
3550 insert_mem_bar(Op_MemBarReleaseLock);
3551
3552 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3553 UnlockNode *unlock = new UnlockNode(C, tf);
3554 #ifdef ASSERT
3555 unlock->set_dbg_jvms(sync_jvms());
3556 #endif
3557 uint raw_idx = Compile::AliasIdxRaw;
3558 unlock->init_req( TypeFunc::Control, control() );
3559 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3560 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3561 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3562 unlock->init_req( TypeFunc::ReturnAdr, top() );
3563
3564 unlock->init_req(TypeFunc::Parms + 0, obj);
3565 unlock->init_req(TypeFunc::Parms + 1, box);
3566 unlock = _gvn.transform(unlock)->as_Unlock();
3567
3568 Node* mem = reset_memory();
3569
3570 // unlock has no side-effects, sets few values
3571 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3572
3573 // Kill monitor from debug info
3574 map()->pop_monitor( );
3575 }
3576
3577 //-------------------------------get_layout_helper-----------------------------
3578 // If the given klass is a constant or known to be an array,
3579 // fetch the constant layout helper value into constant_value
3580 // and return null. Otherwise, load the non-constant
3581 // layout helper value, and return the node which represents it.
3582 // This two-faced routine is useful because allocation sites
3583 // almost always feature constant types.
3584 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3585 const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
3586 if (!StressReflectiveCode && klass_t != nullptr) {
3587 bool xklass = klass_t->klass_is_exact();
3588 if (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM)) {
3589 jint lhelper;
3590 if (klass_t->isa_aryklassptr()) {
3591 BasicType elem = klass_t->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type();
3592 if (is_reference_type(elem, true)) {
3593 elem = T_OBJECT;
3594 }
3595 lhelper = Klass::array_layout_helper(elem);
3596 } else {
3597 lhelper = klass_t->is_instklassptr()->exact_klass()->layout_helper();
3598 }
3599 if (lhelper != Klass::_lh_neutral_value) {
3600 constant_value = lhelper;
3601 return (Node*) nullptr;
3602 }
3603 }
3604 }
3605 constant_value = Klass::_lh_neutral_value; // put in a known value
3606 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3607 return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered);
3608 }
3609
3610 // We just put in an allocate/initialize with a big raw-memory effect.
3611 // Hook selected additional alias categories on the initialization.
3612 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3613 MergeMemNode* init_in_merge,
3614 Node* init_out_raw) {
3615 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
3616 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
3617
3618 Node* prevmem = kit.memory(alias_idx);
3619 init_in_merge->set_memory_at(alias_idx, prevmem);
3620 kit.set_memory(init_out_raw, alias_idx);
3621 }
3622
3623 //---------------------------set_output_for_allocation-------------------------
3624 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
3625 const TypeOopPtr* oop_type,
3626 bool deoptimize_on_exception) {
3627 int rawidx = Compile::AliasIdxRaw;
3628 alloc->set_req( TypeFunc::FramePtr, frameptr() );
3629 add_safepoint_edges(alloc);
3630 Node* allocx = _gvn.transform(alloc);
3631 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
3632 // create memory projection for i_o
3633 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
3634 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
3635
3636 // create a memory projection as for the normal control path
3637 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3638 set_memory(malloc, rawidx);
3639
3640 // a normal slow-call doesn't change i_o, but an allocation does
3641 // we create a separate i_o projection for the normal control path
3642 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3643 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3644
3645 // put in an initialization barrier
3646 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
3647 rawoop)->as_Initialize();
3648 assert(alloc->initialization() == init, "2-way macro link must work");
3649 assert(init ->allocation() == alloc, "2-way macro link must work");
3650 {
3651 // Extract memory strands which may participate in the new object's
3652 // initialization, and source them from the new InitializeNode.
3653 // This will allow us to observe initializations when they occur,
3654 // and link them properly (as a group) to the InitializeNode.
3655 assert(init->in(InitializeNode::Memory) == malloc, "");
3656 MergeMemNode* minit_in = MergeMemNode::make(malloc);
3657 init->set_req(InitializeNode::Memory, minit_in);
3658 record_for_igvn(minit_in); // fold it up later, if possible
3659 Node* minit_out = memory(rawidx);
3660 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
3661 // Add an edge in the MergeMem for the header fields so an access
3662 // to one of those has correct memory state
3663 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
3664 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
3665 if (oop_type->isa_aryptr()) {
3666 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
3667 int elemidx = C->get_alias_index(telemref);
3668 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
3669 } else if (oop_type->isa_instptr()) {
3670 ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
3671 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
3672 ciField* field = ik->nonstatic_field_at(i);
3673 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
3674 continue; // do not bother to track really large numbers of fields
3675 // Find (or create) the alias category for this field:
3676 int fieldidx = C->alias_type(field)->index();
3677 hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3678 }
3679 }
3680 }
3681
3682 // Cast raw oop to the real thing...
3683 Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
3684 javaoop = _gvn.transform(javaoop);
3685 C->set_recent_alloc(control(), javaoop);
3686 assert(just_allocated_object(control()) == javaoop, "just allocated");
3687
3688 #ifdef ASSERT
3689 { // Verify that the AllocateNode::Ideal_allocation recognizers work:
3700 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
3701 }
3702 }
3703 #endif //ASSERT
3704
3705 return javaoop;
3706 }
3707
3708 //---------------------------new_instance--------------------------------------
3709 // This routine takes a klass_node which may be constant (for a static type)
3710 // or may be non-constant (for reflective code). It will work equally well
3711 // for either, and the graph will fold nicely if the optimizer later reduces
3712 // the type to a constant.
3713 // The optional arguments are for specialized use by intrinsics:
3714 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3715 // - If 'return_size_val', report the total object size to the caller.
3716 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3717 Node* GraphKit::new_instance(Node* klass_node,
3718 Node* extra_slow_test,
3719 Node* *return_size_val,
3720 bool deoptimize_on_exception) {
3721 // Compute size in doublewords
3722 // The size is always an integral number of doublewords, represented
3723 // as a positive bytewise size stored in the klass's layout_helper.
3724 // The layout_helper also encodes (in a low bit) the need for a slow path.
3725 jint layout_con = Klass::_lh_neutral_value;
3726 Node* layout_val = get_layout_helper(klass_node, layout_con);
3727 int layout_is_con = (layout_val == nullptr);
3728
3729 if (extra_slow_test == nullptr) extra_slow_test = intcon(0);
3730 // Generate the initial go-slow test. It's either ALWAYS (return a
3731 // Node for 1) or NEVER (return a null) or perhaps (in the reflective
3732 // case) a computed value derived from the layout_helper.
3733 Node* initial_slow_test = nullptr;
3734 if (layout_is_con) {
3735 assert(!StressReflectiveCode, "stress mode does not use these paths");
3736 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3737 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3738 } else { // reflective case
3739 // This reflective path is used by Unsafe.allocateInstance.
3740 // (It may be stress-tested by specifying StressReflectiveCode.)
3741 // Basically, we want to get into the VM is there's an illegal argument.
3742 Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
3743 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
3744 if (extra_slow_test != intcon(0)) {
3745 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
3746 }
3747 // (Macro-expander will further convert this to a Bool, if necessary.)
3758
3759 // Clear the low bits to extract layout_helper_size_in_bytes:
3760 assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
3761 Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
3762 size = _gvn.transform( new AndXNode(size, mask) );
3763 }
3764 if (return_size_val != nullptr) {
3765 (*return_size_val) = size;
3766 }
3767
3768 // This is a precise notnull oop of the klass.
3769 // (Actually, it need not be precise if this is a reflective allocation.)
3770 // It's what we cast the result to.
3771 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
3772 if (!tklass) tklass = TypeInstKlassPtr::OBJECT;
3773 const TypeOopPtr* oop_type = tklass->as_instance_type();
3774
3775 // Now generate allocation code
3776
3777 // The entire memory state is needed for slow path of the allocation
3778 // since GC and deoptimization can happened.
3779 Node *mem = reset_memory();
3780 set_all_memory(mem); // Create new memory state
3781
3782 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3783 control(), mem, i_o(),
3784 size, klass_node,
3785 initial_slow_test);
3786
3787 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
3788 }
3789
3790 //-------------------------------new_array-------------------------------------
3791 // helper for both newarray and anewarray
3792 // The 'length' parameter is (obviously) the length of the array.
3793 // The optional arguments are for specialized use by intrinsics:
3794 // - If 'return_size_val', report the non-padded array size (sum of header size
3795 // and array body) to the caller.
3796 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3797 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3798 Node* length, // number of array elements
3799 int nargs, // number of arguments to push back for uncommon trap
3800 Node* *return_size_val,
3801 bool deoptimize_on_exception) {
3802 jint layout_con = Klass::_lh_neutral_value;
3803 Node* layout_val = get_layout_helper(klass_node, layout_con);
3804 int layout_is_con = (layout_val == nullptr);
3805
3806 if (!layout_is_con && !StressReflectiveCode &&
3807 !too_many_traps(Deoptimization::Reason_class_check)) {
3808 // This is a reflective array creation site.
3809 // Optimistically assume that it is a subtype of Object[],
3810 // so that we can fold up all the address arithmetic.
3811 layout_con = Klass::array_layout_helper(T_OBJECT);
3812 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
3813 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
3814 { BuildCutout unless(this, bol_lh, PROB_MAX);
3815 inc_sp(nargs);
3816 uncommon_trap(Deoptimization::Reason_class_check,
3817 Deoptimization::Action_maybe_recompile);
3818 }
3819 layout_val = nullptr;
3820 layout_is_con = true;
3821 }
3822
3823 // Generate the initial go-slow test. Make sure we do not overflow
3824 // if length is huge (near 2Gig) or negative! We do not need
3825 // exact double-words here, just a close approximation of needed
3826 // double-words. We can't add any offset or rounding bits, lest we
3827 // take a size -1 of bytes and make it positive. Use an unsigned
3828 // compare, so negative sizes look hugely positive.
3829 int fast_size_limit = FastAllocateSizeLimit;
3830 if (layout_is_con) {
3831 assert(!StressReflectiveCode, "stress mode does not use these paths");
3832 // Increase the size limit if we have exact knowledge of array type.
3833 int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
3834 fast_size_limit <<= (LogBytesPerLong - log2_esize);
3835 }
3836
3837 Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
3838 Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
3839
3840 // --- Size Computation ---
3841 // array_size = round_to_heap(array_header + (length << elem_shift));
3842 // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
3843 // and align_to(x, y) == ((x + y-1) & ~(y-1))
3844 // The rounding mask is strength-reduced, if possible.
3845 int round_mask = MinObjAlignmentInBytes - 1;
3846 Node* header_size = nullptr;
3847 // (T_BYTE has the weakest alignment and size restrictions...)
3848 if (layout_is_con) {
3849 int hsize = Klass::layout_helper_header_size(layout_con);
3850 int eshift = Klass::layout_helper_log2_element_size(layout_con);
3851 if ((round_mask & ~right_n_bits(eshift)) == 0)
3852 round_mask = 0; // strength-reduce it if it goes away completely
3853 assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
3854 int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
3855 assert(header_size_min <= hsize, "generic minimum is smallest");
3856 header_size = intcon(hsize);
3857 } else {
3858 Node* hss = intcon(Klass::_lh_header_size_shift);
3859 Node* hsm = intcon(Klass::_lh_header_size_mask);
3860 header_size = _gvn.transform(new URShiftINode(layout_val, hss));
3861 header_size = _gvn.transform(new AndINode(header_size, hsm));
3862 }
3863
3864 Node* elem_shift = nullptr;
3865 if (layout_is_con) {
3866 int eshift = Klass::layout_helper_log2_element_size(layout_con);
3867 if (eshift != 0)
3868 elem_shift = intcon(eshift);
3869 } else {
3870 // There is no need to mask or shift this value.
3871 // The semantics of LShiftINode include an implicit mask to 0x1F.
3872 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
3873 elem_shift = layout_val;
3920 }
3921 Node* non_rounded_size = _gvn.transform(new AddXNode(headerx, abody));
3922
3923 if (return_size_val != nullptr) {
3924 // This is the size
3925 (*return_size_val) = non_rounded_size;
3926 }
3927
3928 Node* size = non_rounded_size;
3929 if (round_mask != 0) {
3930 Node* mask1 = MakeConX(round_mask);
3931 size = _gvn.transform(new AddXNode(size, mask1));
3932 Node* mask2 = MakeConX(~round_mask);
3933 size = _gvn.transform(new AndXNode(size, mask2));
3934 }
3935 // else if round_mask == 0, the size computation is self-rounding
3936
3937 // Now generate allocation code
3938
3939 // The entire memory state is needed for slow path of the allocation
3940 // since GC and deoptimization can happened.
3941 Node *mem = reset_memory();
3942 set_all_memory(mem); // Create new memory state
3943
3944 if (initial_slow_test->is_Bool()) {
3945 // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
3946 initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
3947 }
3948
3949 const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3950 Node* valid_length_test = _gvn.intcon(1);
3951 if (ary_type->isa_aryptr()) {
3952 BasicType bt = ary_type->isa_aryptr()->elem()->array_element_basic_type();
3953 jint max = TypeAryPtr::max_array_length(bt);
3954 Node* valid_length_cmp = _gvn.transform(new CmpUNode(length, intcon(max)));
3955 valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le));
3956 }
3957
3958 // Create the AllocateArrayNode and its result projections
3959 AllocateArrayNode* alloc
3960 = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3961 control(), mem, i_o(),
3962 size, klass_node,
3963 initial_slow_test,
3964 length, valid_length_test);
3965
3966 // Cast to correct type. Note that the klass_node may be constant or not,
3967 // and in the latter case the actual array type will be inexact also.
3968 // (This happens via a non-constant argument to inline_native_newArray.)
3969 // In any case, the value of klass_node provides the desired array type.
3970 const TypeInt* length_type = _gvn.find_int_type(length);
3971 if (ary_type->isa_aryptr() && length_type != nullptr) {
3972 // Try to get a better type than POS for the size
3973 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3974 }
3975
3976 Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
3977
3978 array_ideal_length(alloc, ary_type, true);
3979 return javaoop;
3980 }
3981
3982 // The following "Ideal_foo" functions are placed here because they recognize
3983 // the graph shapes created by the functions immediately above.
3984
3985 //---------------------------Ideal_allocation----------------------------------
4092 set_all_memory(ideal.merged_memory());
4093 set_i_o(ideal.i_o());
4094 set_control(ideal.ctrl());
4095 }
4096
4097 void GraphKit::final_sync(IdealKit& ideal) {
4098 // Final sync IdealKit and graphKit.
4099 sync_kit(ideal);
4100 }
4101
4102 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4103 Node* len = load_array_length(load_String_value(str, set_ctrl));
4104 Node* coder = load_String_coder(str, set_ctrl);
4105 // Divide length by 2 if coder is UTF16
4106 return _gvn.transform(new RShiftINode(len, coder));
4107 }
4108
4109 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4110 int value_offset = java_lang_String::value_offset();
4111 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4112 false, nullptr, 0);
4113 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4114 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4115 TypeAry::make(TypeInt::BYTE, TypeInt::POS),
4116 ciTypeArrayKlass::make(T_BYTE), true, 0);
4117 Node* p = basic_plus_adr(str, str, value_offset);
4118 Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4119 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4120 return load;
4121 }
4122
4123 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4124 if (!CompactStrings) {
4125 return intcon(java_lang_String::CODER_UTF16);
4126 }
4127 int coder_offset = java_lang_String::coder_offset();
4128 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4129 false, nullptr, 0);
4130 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4131
4132 Node* p = basic_plus_adr(str, str, coder_offset);
4133 Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4134 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4135 return load;
4136 }
4137
4138 void GraphKit::store_String_value(Node* str, Node* value) {
4139 int value_offset = java_lang_String::value_offset();
4140 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4141 false, nullptr, 0);
4142 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4143
4144 access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,
4145 value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4146 }
4147
4148 void GraphKit::store_String_coder(Node* str, Node* value) {
4149 int coder_offset = java_lang_String::coder_offset();
4150 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4151 false, nullptr, 0);
4152 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4153
4154 access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4155 value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4156 }
4157
4158 // Capture src and dst memory state with a MergeMemNode
4159 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4160 if (src_type == dst_type) {
4161 // Types are equal, we don't need a MergeMemNode
4162 return memory(src_type);
4163 }
4164 MergeMemNode* merge = MergeMemNode::make(map()->memory());
4165 record_for_igvn(merge); // fold it up later, if possible
4166 int src_idx = C->get_alias_index(src_type);
4167 int dst_idx = C->get_alias_index(dst_type);
4168 merge->set_memory_at(src_idx, memory(src_idx));
4169 merge->set_memory_at(dst_idx, memory(dst_idx));
4170 return merge;
4171 }
4244 i_char->init_req(2, AddI(i_char, intcon(2)));
4245
4246 set_control(IfFalse(iff));
4247 set_memory(st, TypeAryPtr::BYTES);
4248 }
4249
4250 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4251 if (!field->is_constant()) {
4252 return nullptr; // Field not marked as constant.
4253 }
4254 ciInstance* holder = nullptr;
4255 if (!field->is_static()) {
4256 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4257 if (const_oop != nullptr && const_oop->is_instance()) {
4258 holder = const_oop->as_instance();
4259 }
4260 }
4261 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4262 /*is_unsigned_load=*/false);
4263 if (con_type != nullptr) {
4264 return makecon(con_type);
4265 }
4266 return nullptr;
4267 }
4268
4269 Node* GraphKit::maybe_narrow_object_type(Node* obj, ciKlass* type) {
4270 const TypeOopPtr* obj_type = obj->bottom_type()->isa_oopptr();
4271 const TypeOopPtr* sig_type = TypeOopPtr::make_from_klass(type);
4272 if (obj_type != nullptr && sig_type->is_loaded() && !obj_type->higher_equal(sig_type)) {
4273 const Type* narrow_obj_type = obj_type->filter_speculative(sig_type); // keep speculative part
4274 Node* casted_obj = gvn().transform(new CheckCastPPNode(control(), obj, narrow_obj_type));
4275 return casted_obj;
4276 }
4277 return obj;
4278 }
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/ciFlatArrayKlass.hpp"
27 #include "ci/ciInlineKlass.hpp"
28 #include "ci/ciUtilities.hpp"
29 #include "classfile/javaClasses.hpp"
30 #include "ci/ciObjArray.hpp"
31 #include "asm/register.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/c2/barrierSetC2.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "opto/addnode.hpp"
38 #include "opto/castnode.hpp"
39 #include "opto/convertnode.hpp"
40 #include "opto/graphKit.hpp"
41 #include "opto/idealKit.hpp"
42 #include "opto/inlinetypenode.hpp"
43 #include "opto/intrinsicnode.hpp"
44 #include "opto/locknode.hpp"
45 #include "opto/machnode.hpp"
46 #include "opto/narrowptrnode.hpp"
47 #include "opto/opaquenode.hpp"
48 #include "opto/parse.hpp"
49 #include "opto/rootnode.hpp"
50 #include "opto/runtime.hpp"
51 #include "opto/subtypenode.hpp"
52 #include "runtime/deoptimization.hpp"
53 #include "runtime/sharedRuntime.hpp"
54 #include "utilities/bitMap.inline.hpp"
55 #include "utilities/powerOfTwo.hpp"
56 #include "utilities/growableArray.hpp"
57
58 //----------------------------GraphKit-----------------------------------------
59 // Main utility constructor.
60 GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn)
61 : Phase(Phase::Parser),
62 _env(C->env()),
63 _gvn((gvn != nullptr) ? *gvn : *C->initial_gvn()),
64 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
65 {
66 assert(gvn == nullptr || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled");
67 _exceptions = jvms->map()->next_exception();
68 if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr);
69 set_jvms(jvms);
70 #ifdef ASSERT
71 if (_gvn.is_IterGVN() != nullptr) {
72 assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used");
73 // Save the initial size of _for_igvn worklist for verification (see ~GraphKit)
74 _worklist_size = _gvn.C->igvn_worklist()->size();
75 }
76 #endif
77 }
78
79 // Private constructor for parser.
80 GraphKit::GraphKit()
81 : Phase(Phase::Parser),
82 _env(C->env()),
83 _gvn(*C->initial_gvn()),
84 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
85 {
86 _exceptions = nullptr;
87 set_map(nullptr);
88 debug_only(_sp = -99);
89 debug_only(set_bci(-99));
90 }
91
92
93
94 //---------------------------clean_stack---------------------------------------
95 // Clear away rubbish from the stack area of the JVM state.
96 // This destroys any arguments that may be waiting on the stack.
852 if (PrintMiscellaneous && (Verbose || WizardMode)) {
853 tty->print_cr("Zombie local %d: ", local);
854 jvms->dump();
855 }
856 return false;
857 }
858 }
859 }
860 return true;
861 }
862
863 #endif //ASSERT
864
865 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.
866 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
867 ciMethod* cur_method = jvms->method();
868 int cur_bci = jvms->bci();
869 if (cur_method != nullptr && cur_bci != InvocationEntryBci) {
870 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
871 return Interpreter::bytecode_should_reexecute(code) ||
872 (is_anewarray && (code == Bytecodes::_multianewarray));
873 // Reexecute _multianewarray bytecode which was replaced with
874 // sequence of [a]newarray. See Parse::do_multianewarray().
875 //
876 // Note: interpreter should not have it set since this optimization
877 // is limited by dimensions and guarded by flag so in some cases
878 // multianewarray() runtime calls will be generated and
879 // the bytecode should not be reexecutes (stack will not be reset).
880 } else {
881 return false;
882 }
883 }
884
885 // Helper function for adding JVMState and debug information to node
886 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
887 // Add the safepoint edges to the call (or other safepoint).
888
889 // Make sure dead locals are set to top. This
890 // should help register allocation time and cut down on the size
891 // of the deoptimization information.
892 assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
943 }
944
945 // Presize the call:
946 DEBUG_ONLY(uint non_debug_edges = call->req());
947 call->add_req_batch(top(), youngest_jvms->debug_depth());
948 assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
949
950 // Set up edges so that the call looks like this:
951 // Call [state:] ctl io mem fptr retadr
952 // [parms:] parm0 ... parmN
953 // [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
954 // [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
955 // [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
956 // Note that caller debug info precedes callee debug info.
957
958 // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
959 uint debug_ptr = call->req();
960
961 // Loop over the map input edges associated with jvms, add them
962 // to the call node, & reset all offsets to match call node array.
963
964 JVMState* callee_jvms = nullptr;
965 for (JVMState* in_jvms = youngest_jvms; in_jvms != nullptr; ) {
966 uint debug_end = debug_ptr;
967 uint debug_start = debug_ptr - in_jvms->debug_size();
968 debug_ptr = debug_start; // back up the ptr
969
970 uint p = debug_start; // walks forward in [debug_start, debug_end)
971 uint j, k, l;
972 SafePointNode* in_map = in_jvms->map();
973 out_jvms->set_map(call);
974
975 if (can_prune_locals) {
976 assert(in_jvms->method() == out_jvms->method(), "sanity");
977 // If the current throw can reach an exception handler in this JVMS,
978 // then we must keep everything live that can reach that handler.
979 // As a quick and dirty approximation, we look for any handlers at all.
980 if (in_jvms->method()->has_exception_handlers()) {
981 can_prune_locals = false;
982 }
983 }
984
985 // Add the Locals
986 k = in_jvms->locoff();
987 l = in_jvms->loc_size();
988 out_jvms->set_locoff(p);
989 if (!can_prune_locals) {
990 for (j = 0; j < l; j++) {
991 Node* val = in_map->in(k + j);
992 // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state
993 if (callee_jvms != nullptr && val->is_InlineType() && val->as_InlineType()->is_larval() &&
994 callee_jvms->method()->is_object_constructor() && val == in_map->argument(in_jvms, 0) &&
995 val->bottom_type()->is_inlinetypeptr()) {
996 val = callee_jvms->map()->local(callee_jvms, 0); // Receiver
997 }
998 call->set_req(p++, val);
999 }
1000 } else {
1001 p += l; // already set to top above by add_req_batch
1002 }
1003
1004 // Add the Expression Stack
1005 k = in_jvms->stkoff();
1006 l = in_jvms->sp();
1007 out_jvms->set_stkoff(p);
1008 if (!can_prune_locals) {
1009 for (j = 0; j < l; j++) {
1010 Node* val = in_map->in(k + j);
1011 // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state
1012 if (callee_jvms != nullptr && val->is_InlineType() && val->as_InlineType()->is_larval() &&
1013 callee_jvms->method()->is_object_constructor() && val == in_map->argument(in_jvms, 0) &&
1014 val->bottom_type()->is_inlinetypeptr()) {
1015 val = callee_jvms->map()->local(callee_jvms, 0); // Receiver
1016 }
1017 call->set_req(p++, val);
1018 }
1019 } else if (can_prune_locals && stack_slots_not_pruned != 0) {
1020 // Divide stack into {S0,...,S1}, where S0 is set to top.
1021 uint s1 = stack_slots_not_pruned;
1022 stack_slots_not_pruned = 0; // for next iteration
1023 if (s1 > l) s1 = l;
1024 uint s0 = l - s1;
1025 p += s0; // skip the tops preinstalled by add_req_batch
1026 for (j = s0; j < l; j++)
1027 call->set_req(p++, in_map->in(k+j));
1028 } else {
1029 p += l; // already set to top above by add_req_batch
1030 }
1031
1032 // Add the Monitors
1033 k = in_jvms->monoff();
1034 l = in_jvms->mon_size();
1035 out_jvms->set_monoff(p);
1036 for (j = 0; j < l; j++)
1037 call->set_req(p++, in_map->in(k+j));
1038
1039 // Copy any scalar object fields.
1040 k = in_jvms->scloff();
1041 l = in_jvms->scl_size();
1042 out_jvms->set_scloff(p);
1043 for (j = 0; j < l; j++)
1044 call->set_req(p++, in_map->in(k+j));
1045
1046 // Finish the new jvms.
1047 out_jvms->set_endoff(p);
1048
1049 assert(out_jvms->endoff() == debug_end, "fill ptr must match");
1050 assert(out_jvms->depth() == in_jvms->depth(), "depth must match");
1051 assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match");
1052 assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match");
1053 assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match");
1054 assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
1055
1056 // Update the two tail pointers in parallel.
1057 callee_jvms = out_jvms;
1058 out_jvms = out_jvms->caller();
1059 in_jvms = in_jvms->caller();
1060 }
1061
1062 assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
1063
1064 // Test the correctness of JVMState::debug_xxx accessors:
1065 assert(call->jvms()->debug_start() == non_debug_edges, "");
1066 assert(call->jvms()->debug_end() == call->req(), "");
1067 assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
1068 }
1069
1070 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
1071 Bytecodes::Code code = java_bc();
1072 if (code == Bytecodes::_wide) {
1073 code = method()->java_code_at_bci(bci() + 1);
1074 }
1075
1076 if (code != Bytecodes::_illegal) {
1077 depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
1213 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1214 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1215 return _gvn.transform( new AndLNode(conv, mask) );
1216 }
1217
1218 Node* GraphKit::ConvL2I(Node* offset) {
1219 // short-circuit a common case
1220 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1221 if (offset_con != (jlong)Type::OffsetBot) {
1222 return intcon((int) offset_con);
1223 }
1224 return _gvn.transform( new ConvL2INode(offset));
1225 }
1226
1227 //-------------------------load_object_klass-----------------------------------
1228 Node* GraphKit::load_object_klass(Node* obj) {
1229 // Special-case a fresh allocation to avoid building nodes:
1230 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1231 if (akls != nullptr) return akls;
1232 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1233 return _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
1234 }
1235
1236 //-------------------------load_array_length-----------------------------------
1237 Node* GraphKit::load_array_length(Node* array) {
1238 // Special-case a fresh allocation to avoid building nodes:
1239 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array);
1240 Node *alen;
1241 if (alloc == nullptr) {
1242 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1243 alen = _gvn.transform( new LoadRangeNode(nullptr, immutable_memory(), r_adr, TypeInt::POS));
1244 } else {
1245 alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false);
1246 }
1247 return alen;
1248 }
1249
1250 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc,
1251 const TypeOopPtr* oop_type,
1252 bool replace_length_in_map) {
1253 Node* length = alloc->Ideal_length();
1262 replace_in_map(length, ccast);
1263 }
1264 return ccast;
1265 }
1266 }
1267 return length;
1268 }
1269
1270 //------------------------------do_null_check----------------------------------
1271 // Helper function to do a null pointer check. Returned value is
1272 // the incoming address with null casted away. You are allowed to use the
1273 // not-null value only if you are control dependent on the test.
1274 #ifndef PRODUCT
1275 extern uint explicit_null_checks_inserted,
1276 explicit_null_checks_elided;
1277 #endif
1278 Node* GraphKit::null_check_common(Node* value, BasicType type,
1279 // optional arguments for variations:
1280 bool assert_null,
1281 Node* *null_control,
1282 bool speculative,
1283 bool is_init_check) {
1284 assert(!assert_null || null_control == nullptr, "not both at once");
1285 if (stopped()) return top();
1286 NOT_PRODUCT(explicit_null_checks_inserted++);
1287
1288 if (value->is_InlineType()) {
1289 // Null checking a scalarized but nullable inline type. Check the IsInit
1290 // input instead of the oop input to avoid keeping buffer allocations alive.
1291 InlineTypeNode* vtptr = value->as_InlineType();
1292 while (vtptr->get_oop()->is_InlineType()) {
1293 vtptr = vtptr->get_oop()->as_InlineType();
1294 }
1295 null_check_common(vtptr->get_is_init(), T_INT, assert_null, null_control, speculative, true);
1296 if (stopped()) {
1297 return top();
1298 }
1299 if (assert_null) {
1300 // TODO 8284443 Scalarize here (this currently leads to compilation bailouts)
1301 // vtptr = InlineTypeNode::make_null(_gvn, vtptr->type()->inline_klass());
1302 // replace_in_map(value, vtptr);
1303 // return vtptr;
1304 replace_in_map(value, null());
1305 return null();
1306 }
1307 bool do_replace_in_map = (null_control == nullptr || (*null_control) == top());
1308 return cast_not_null(value, do_replace_in_map);
1309 }
1310
1311 // Construct null check
1312 Node *chk = nullptr;
1313 switch(type) {
1314 case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1315 case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;
1316 case T_ARRAY : // fall through
1317 type = T_OBJECT; // simplify further tests
1318 case T_OBJECT : {
1319 const Type *t = _gvn.type( value );
1320
1321 const TypeOopPtr* tp = t->isa_oopptr();
1322 if (tp != nullptr && !tp->is_loaded()
1323 // Only for do_null_check, not any of its siblings:
1324 && !assert_null && null_control == nullptr) {
1325 // Usually, any field access or invocation on an unloaded oop type
1326 // will simply fail to link, since the statically linked class is
1327 // likely also to be unloaded. However, in -Xcomp mode, sometimes
1328 // the static class is loaded but the sharper oop type is not.
1329 // Rather than checking for this obscure case in lots of places,
1330 // we simply observe that a null check on an unloaded class
1394 }
1395 Node *oldcontrol = control();
1396 set_control(cfg);
1397 Node *res = cast_not_null(value);
1398 set_control(oldcontrol);
1399 NOT_PRODUCT(explicit_null_checks_elided++);
1400 return res;
1401 }
1402 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1403 if (cfg == nullptr) break; // Quit at region nodes
1404 depth++;
1405 }
1406 }
1407
1408 //-----------
1409 // Branch to failure if null
1410 float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
1411 Deoptimization::DeoptReason reason;
1412 if (assert_null) {
1413 reason = Deoptimization::reason_null_assert(speculative);
1414 } else if (type == T_OBJECT || is_init_check) {
1415 reason = Deoptimization::reason_null_check(speculative);
1416 } else {
1417 reason = Deoptimization::Reason_div0_check;
1418 }
1419 // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1420 // ciMethodData::has_trap_at will return a conservative -1 if any
1421 // must-be-null assertion has failed. This could cause performance
1422 // problems for a method after its first do_null_assert failure.
1423 // Consider using 'Reason_class_check' instead?
1424
1425 // To cause an implicit null check, we set the not-null probability
1426 // to the maximum (PROB_MAX). For an explicit check the probability
1427 // is set to a smaller value.
1428 if (null_control != nullptr || too_many_traps(reason)) {
1429 // probability is less likely
1430 ok_prob = PROB_LIKELY_MAG(3);
1431 } else if (!assert_null &&
1432 (ImplicitNullCheckThreshold > 0) &&
1433 method() != nullptr &&
1434 (method()->method_data()->trap_count(reason)
1468 }
1469
1470 if (assert_null) {
1471 // Cast obj to null on this path.
1472 replace_in_map(value, zerocon(type));
1473 return zerocon(type);
1474 }
1475
1476 // Cast obj to not-null on this path, if there is no null_control.
1477 // (If there is a null_control, a non-null value may come back to haunt us.)
1478 if (type == T_OBJECT) {
1479 Node* cast = cast_not_null(value, false);
1480 if (null_control == nullptr || (*null_control) == top())
1481 replace_in_map(value, cast);
1482 value = cast;
1483 }
1484
1485 return value;
1486 }
1487
1488 //------------------------------cast_not_null----------------------------------
1489 // Cast obj to not-null on this path
1490 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1491 if (obj->is_InlineType()) {
1492 Node* vt = obj->isa_InlineType()->clone_if_required(&gvn(), map(), do_replace_in_map);
1493 vt->as_InlineType()->set_is_init(_gvn);
1494 vt = _gvn.transform(vt);
1495 if (do_replace_in_map) {
1496 replace_in_map(obj, vt);
1497 }
1498 return vt;
1499 }
1500 const Type *t = _gvn.type(obj);
1501 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1502 // Object is already not-null?
1503 if( t == t_not_null ) return obj;
1504
1505 Node* cast = new CastPPNode(control(), obj,t_not_null);
1506 cast = _gvn.transform( cast );
1507
1508 // Scan for instances of 'obj' in the current JVM mapping.
1509 // These instances are known to be not-null after the test.
1510 if (do_replace_in_map)
1511 replace_in_map(obj, cast);
1512
1513 return cast; // Return casted value
1514 }
1515
1516 // Sometimes in intrinsics, we implicitly know an object is not null
1517 // (there's no actual null check) so we can cast it to not null. In
1518 // the course of optimizations, the input to the cast can become null.
1519 // In that case that data path will die and we need the control path
1608 // These are layered on top of the factory methods in LoadNode and StoreNode,
1609 // and integrate with the parser's memory state and _gvn engine.
1610 //
1611
1612 // factory methods in "int adr_idx"
1613 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1614 int adr_idx,
1615 MemNode::MemOrd mo,
1616 LoadNode::ControlDependency control_dependency,
1617 bool require_atomic_access,
1618 bool unaligned,
1619 bool mismatched,
1620 bool unsafe,
1621 uint8_t barrier_data) {
1622 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1623 const TypePtr* adr_type = nullptr; // debug-mode-only argument
1624 debug_only(adr_type = C->get_adr_type(adr_idx));
1625 Node* mem = memory(adr_idx);
1626 Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
1627 ld = _gvn.transform(ld);
1628
1629 if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1630 // Improve graph before escape analysis and boxing elimination.
1631 record_for_igvn(ld);
1632 if (ld->is_DecodeN()) {
1633 // Also record the actual load (LoadN) in case ld is DecodeN. In some
1634 // rare corner cases, ld->in(1) can be something other than LoadN (e.g.,
1635 // a Phi). Recording such cases is still perfectly sound, but may be
1636 // unnecessary and result in some minor IGVN overhead.
1637 record_for_igvn(ld->in(1));
1638 }
1639 }
1640 return ld;
1641 }
1642
1643 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1644 int adr_idx,
1645 MemNode::MemOrd mo,
1646 bool require_atomic_access,
1647 bool unaligned,
1648 bool mismatched,
1662 if (unsafe) {
1663 st->as_Store()->set_unsafe_access();
1664 }
1665 st->as_Store()->set_barrier_data(barrier_data);
1666 st = _gvn.transform(st);
1667 set_memory(st, adr_idx);
1668 // Back-to-back stores can only remove intermediate store with DU info
1669 // so push on worklist for optimizer.
1670 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1671 record_for_igvn(st);
1672
1673 return st;
1674 }
1675
1676 Node* GraphKit::access_store_at(Node* obj,
1677 Node* adr,
1678 const TypePtr* adr_type,
1679 Node* val,
1680 const Type* val_type,
1681 BasicType bt,
1682 DecoratorSet decorators,
1683 bool safe_for_replace) {
1684 // Transformation of a value which could be null pointer (CastPP #null)
1685 // could be delayed during Parse (for example, in adjust_map_after_if()).
1686 // Execute transformation here to avoid barrier generation in such case.
1687 if (_gvn.type(val) == TypePtr::NULL_PTR) {
1688 val = _gvn.makecon(TypePtr::NULL_PTR);
1689 }
1690
1691 if (stopped()) {
1692 return top(); // Dead path ?
1693 }
1694
1695 assert(val != nullptr, "not dead path");
1696 if (val->is_InlineType()) {
1697 // Store to non-flat field. Buffer the inline type and make sure
1698 // the store is re-executed if the allocation triggers deoptimization.
1699 PreserveReexecuteState preexecs(this);
1700 jvms()->set_should_reexecute(true);
1701 val = val->as_InlineType()->buffer(this, safe_for_replace);
1702 }
1703
1704 C2AccessValuePtr addr(adr, adr_type);
1705 C2AccessValue value(val, val_type);
1706 C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1707 if (access.is_raw()) {
1708 return _barrier_set->BarrierSetC2::store_at(access, value);
1709 } else {
1710 return _barrier_set->store_at(access, value);
1711 }
1712 }
1713
1714 Node* GraphKit::access_load_at(Node* obj, // containing obj
1715 Node* adr, // actual address to store val at
1716 const TypePtr* adr_type,
1717 const Type* val_type,
1718 BasicType bt,
1719 DecoratorSet decorators,
1720 Node* ctl) {
1721 if (stopped()) {
1722 return top(); // Dead path ?
1723 }
1724
1725 C2AccessValuePtr addr(adr, adr_type);
1726 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr, ctl);
1727 if (access.is_raw()) {
1728 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1729 } else {
1730 return _barrier_set->load_at(access, val_type);
1731 }
1732 }
1733
1734 Node* GraphKit::access_load(Node* adr, // actual address to load val at
1735 const Type* val_type,
1736 BasicType bt,
1737 DecoratorSet decorators) {
1738 if (stopped()) {
1739 return top(); // Dead path ?
1740 }
1741
1742 C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());
1743 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, nullptr, addr);
1744 if (access.is_raw()) {
1745 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1746 } else {
1811 Node* new_val,
1812 const Type* value_type,
1813 BasicType bt,
1814 DecoratorSet decorators) {
1815 C2AccessValuePtr addr(adr, adr_type);
1816 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1817 if (access.is_raw()) {
1818 return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1819 } else {
1820 return _barrier_set->atomic_add_at(access, new_val, value_type);
1821 }
1822 }
1823
1824 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
1825 return _barrier_set->clone(this, src, dst, size, is_array);
1826 }
1827
1828 //-------------------------array_element_address-------------------------
1829 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1830 const TypeInt* sizetype, Node* ctrl) {
1831 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
1832 uint shift = arytype->is_flat() ? arytype->flat_log_elem_size() : exact_log2(type2aelembytes(elembt));
1833 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1834
1835 // short-circuit a common case (saves lots of confusing waste motion)
1836 jint idx_con = find_int_con(idx, -1);
1837 if (idx_con >= 0) {
1838 intptr_t offset = header + ((intptr_t)idx_con << shift);
1839 return basic_plus_adr(ary, offset);
1840 }
1841
1842 // must be correct type for alignment purposes
1843 Node* base = basic_plus_adr(ary, header);
1844 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1845 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1846 return basic_plus_adr(ary, base, scale);
1847 }
1848
1849 //-------------------------load_array_element-------------------------
1850 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
1851 const Type* elemtype = arytype->elem();
1852 BasicType elembt = elemtype->array_element_basic_type();
1853 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1854 if (elembt == T_NARROWOOP) {
1855 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1856 }
1857 Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,
1858 IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0));
1859 return ld;
1860 }
1861
1862 //-------------------------set_arguments_for_java_call-------------------------
1863 // Arguments (pre-popped from the stack) are taken from the JVMS.
1864 void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline) {
1865 PreserveReexecuteState preexecs(this);
1866 if (EnableValhalla) {
1867 // Make sure the call is "re-executed", if buffering of inline type arguments triggers deoptimization.
1868 // At this point, the call hasn't been executed yet, so we will only ever execute the call once.
1869 jvms()->set_should_reexecute(true);
1870 int arg_size = method()->get_declared_signature_at_bci(bci())->arg_size_for_bc(java_bc());
1871 inc_sp(arg_size);
1872 }
1873 // Add the call arguments
1874 const TypeTuple* domain = call->tf()->domain_sig();
1875 uint nargs = domain->cnt();
1876 int arg_num = 0;
1877 for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1878 Node* arg = argument(i-TypeFunc::Parms);
1879 const Type* t = domain->field_at(i);
1880 // TODO 8284443 A static call to a mismatched method should still be scalarized
1881 if (t->is_inlinetypeptr() && !call->method()->get_Method()->mismatch() && call->method()->is_scalarized_arg(arg_num)) {
1882 // We don't pass inline type arguments by reference but instead pass each field of the inline type
1883 if (!arg->is_InlineType()) {
1884 assert(_gvn.type(arg)->is_zero_type() && !t->inline_klass()->is_null_free(), "Unexpected argument type");
1885 arg = InlineTypeNode::make_from_oop(this, arg, t->inline_klass(), t->inline_klass()->is_null_free());
1886 }
1887 InlineTypeNode* vt = arg->as_InlineType();
1888 vt->pass_fields(this, call, idx, true, !t->maybe_null());
1889 // If an inline type argument is passed as fields, attach the Method* to the call site
1890 // to be able to access the extended signature later via attached_method_before_pc().
1891 // For example, see CompiledMethod::preserve_callee_argument_oops().
1892 call->set_override_symbolic_info(true);
1893 // Register an evol dependency on the callee method to make sure that this method is deoptimized and
1894 // re-compiled with a non-scalarized calling convention if the callee method is later marked as mismatched.
1895 C->dependencies()->assert_evol_method(call->method());
1896 arg_num++;
1897 continue;
1898 } else if (arg->is_InlineType()) {
1899 // Pass inline type argument via oop to callee
1900 InlineTypeNode* inline_type = arg->as_InlineType();
1901 const ciMethod* method = call->method();
1902 ciInstanceKlass* holder = method->holder();
1903 const bool is_receiver = (i == TypeFunc::Parms);
1904 const bool is_abstract_or_object_klass_constructor = method->is_object_constructor() &&
1905 (holder->is_abstract() || holder->is_java_lang_Object());
1906 const bool is_larval_receiver_on_super_constructor = is_receiver && is_abstract_or_object_klass_constructor;
1907 bool must_init_buffer = true;
1908 // We always need to buffer inline types when they are escaping. However, we can skip the actual initialization
1909 // of the buffer if the inline type is a larval because we are going to update the buffer anyway which requires
1910 // us to create a new one. But there is one special case where we are still required to initialize the buffer:
1911 // When we have a larval receiver invoked on an abstract (value class) constructor or the Object constructor (that
1912 // is not going to be inlined). After this call, the larval is completely initialized and thus not a larval anymore.
1913 // We therefore need to force an initialization of the buffer to not lose all the field writes so far in case the
1914 // buffer needs to be used (e.g. to read from when deoptimizing at runtime) or further updated in abstract super
1915 // value class constructors which could have more fields to be initialized. Note that we do not need to
1916 // initialize the buffer when invoking another constructor in the same class on a larval receiver because we
1917 // have not initialized any fields, yet (this is done completely by the other constructor call).
1918 if (inline_type->is_larval() && !is_larval_receiver_on_super_constructor) {
1919 must_init_buffer = false;
1920 }
1921 arg = inline_type->buffer(this, true, must_init_buffer);
1922 }
1923 if (t != Type::HALF) {
1924 arg_num++;
1925 }
1926 call->init_req(idx++, arg);
1927 }
1928 }
1929
1930 //---------------------------set_edges_for_java_call---------------------------
1931 // Connect a newly created call into the current JVMS.
1932 // A return value node (if any) is returned from set_edges_for_java_call.
1933 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1934
1935 // Add the predefined inputs:
1936 call->init_req( TypeFunc::Control, control() );
1937 call->init_req( TypeFunc::I_O , i_o() );
1938 call->init_req( TypeFunc::Memory , reset_memory() );
1939 call->init_req( TypeFunc::FramePtr, frameptr() );
1940 call->init_req( TypeFunc::ReturnAdr, top() );
1941
1942 add_safepoint_edges(call, must_throw);
1943
1944 Node* xcall = _gvn.transform(call);
1945
1946 if (xcall == top()) {
1947 set_control(top());
1948 return;
1949 }
1950 assert(xcall == call, "call identity is stable");
1951
1952 // Re-use the current map to produce the result.
1953
1954 set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1955 set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj)));
1956 set_all_memory_call(xcall, separate_io_proj);
1957
1958 //return xcall; // no need, caller already has it
1959 }
1960
1961 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1962 if (stopped()) return top(); // maybe the call folded up?
1963
1964 // Note: Since any out-of-line call can produce an exception,
1965 // we always insert an I_O projection from the call into the result.
1966
1967 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1968
1969 if (separate_io_proj) {
1970 // The caller requested separate projections be used by the fall
1971 // through and exceptional paths, so replace the projections for
1972 // the fall through path.
1973 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1974 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1975 }
1976
1977 // Capture the return value, if any.
1978 Node* ret;
1979 if (call->method() == nullptr || call->method()->return_type()->basic_type() == T_VOID) {
1980 ret = top();
1981 } else if (call->tf()->returns_inline_type_as_fields()) {
1982 // Return of multiple values (inline type fields): we create a
1983 // InlineType node, each field is a projection from the call.
1984 ciInlineKlass* vk = call->method()->return_type()->as_inline_klass();
1985 uint base_input = TypeFunc::Parms;
1986 ret = InlineTypeNode::make_from_multi(this, call, vk, base_input, false, false);
1987 } else {
1988 ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1989 ciType* t = call->method()->return_type();
1990 if (t->is_klass()) {
1991 const Type* type = TypeOopPtr::make_from_klass(t->as_klass());
1992 if (type->is_inlinetypeptr()) {
1993 ret = InlineTypeNode::make_from_oop(this, ret, type->inline_klass(), type->inline_klass()->is_null_free());
1994 }
1995 }
1996 }
1997
1998 // We just called the constructor on a value type receiver. Reload it from the buffer
1999 ciMethod* method = call->method();
2000 if (method->is_object_constructor() && !method->holder()->is_java_lang_Object()) {
2001 InlineTypeNode* inline_type_receiver = call->in(TypeFunc::Parms)->isa_InlineType();
2002 if (inline_type_receiver != nullptr) {
2003 assert(inline_type_receiver->is_larval(), "must be larval");
2004 assert(inline_type_receiver->is_allocated(&gvn()), "larval must be buffered");
2005 InlineTypeNode* reloaded = InlineTypeNode::make_from_oop(this, inline_type_receiver->get_oop(),
2006 inline_type_receiver->bottom_type()->inline_klass(), true);
2007 assert(!reloaded->is_larval(), "should not be larval anymore");
2008 replace_in_map(inline_type_receiver, reloaded);
2009 }
2010 }
2011
2012 return ret;
2013 }
2014
2015 //--------------------set_predefined_input_for_runtime_call--------------------
2016 // Reading and setting the memory state is way conservative here.
2017 // The real problem is that I am not doing real Type analysis on memory,
2018 // so I cannot distinguish card mark stores from other stores. Across a GC
2019 // point the Store Barrier and the card mark memory has to agree. I cannot
2020 // have a card mark store and its barrier split across the GC point from
2021 // either above or below. Here I get that to happen by reading ALL of memory.
2022 // A better answer would be to separate out card marks from other memory.
2023 // For now, return the input memory state, so that it can be reused
2024 // after the call, if this call has restricted memory effects.
2025 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
2026 // Set fixed predefined input arguments
2027 Node* memory = reset_memory();
2028 Node* m = narrow_mem == nullptr ? memory : narrow_mem;
2029 call->init_req( TypeFunc::Control, control() );
2030 call->init_req( TypeFunc::I_O, top() ); // does no i/o
2031 call->init_req( TypeFunc::Memory, m ); // may gc ptrs
2082 if (use->is_MergeMem()) {
2083 wl.push(use);
2084 }
2085 }
2086 }
2087
2088 // Replace the call with the current state of the kit.
2089 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
2090 JVMState* ejvms = nullptr;
2091 if (has_exceptions()) {
2092 ejvms = transfer_exceptions_into_jvms();
2093 }
2094
2095 ReplacedNodes replaced_nodes = map()->replaced_nodes();
2096 ReplacedNodes replaced_nodes_exception;
2097 Node* ex_ctl = top();
2098
2099 SafePointNode* final_state = stop();
2100
2101 // Find all the needed outputs of this call
2102 CallProjections* callprojs = call->extract_projections(true);
2103
2104 Unique_Node_List wl;
2105 Node* init_mem = call->in(TypeFunc::Memory);
2106 Node* final_mem = final_state->in(TypeFunc::Memory);
2107 Node* final_ctl = final_state->in(TypeFunc::Control);
2108 Node* final_io = final_state->in(TypeFunc::I_O);
2109
2110 // Replace all the old call edges with the edges from the inlining result
2111 if (callprojs->fallthrough_catchproj != nullptr) {
2112 C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl);
2113 }
2114 if (callprojs->fallthrough_memproj != nullptr) {
2115 if (final_mem->is_MergeMem()) {
2116 // Parser's exits MergeMem was not transformed but may be optimized
2117 final_mem = _gvn.transform(final_mem);
2118 }
2119 C->gvn_replace_by(callprojs->fallthrough_memproj, final_mem);
2120 add_mergemem_users_to_worklist(wl, final_mem);
2121 }
2122 if (callprojs->fallthrough_ioproj != nullptr) {
2123 C->gvn_replace_by(callprojs->fallthrough_ioproj, final_io);
2124 }
2125
2126 // Replace the result with the new result if it exists and is used
2127 if (callprojs->resproj[0] != nullptr && result != nullptr) {
2128 // If the inlined code is dead, the result projections for an inline type returned as
2129 // fields have not been replaced. They will go away once the call is replaced by TOP below.
2130 assert(callprojs->nb_resproj == 1 || (call->tf()->returns_inline_type_as_fields() && stopped()),
2131 "unexpected number of results");
2132 C->gvn_replace_by(callprojs->resproj[0], result);
2133 }
2134
2135 if (ejvms == nullptr) {
2136 // No exception edges to simply kill off those paths
2137 if (callprojs->catchall_catchproj != nullptr) {
2138 C->gvn_replace_by(callprojs->catchall_catchproj, C->top());
2139 }
2140 if (callprojs->catchall_memproj != nullptr) {
2141 C->gvn_replace_by(callprojs->catchall_memproj, C->top());
2142 }
2143 if (callprojs->catchall_ioproj != nullptr) {
2144 C->gvn_replace_by(callprojs->catchall_ioproj, C->top());
2145 }
2146 // Replace the old exception object with top
2147 if (callprojs->exobj != nullptr) {
2148 C->gvn_replace_by(callprojs->exobj, C->top());
2149 }
2150 } else {
2151 GraphKit ekit(ejvms);
2152
2153 // Load my combined exception state into the kit, with all phis transformed:
2154 SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
2155 replaced_nodes_exception = ex_map->replaced_nodes();
2156
2157 Node* ex_oop = ekit.use_exception_state(ex_map);
2158
2159 if (callprojs->catchall_catchproj != nullptr) {
2160 C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control());
2161 ex_ctl = ekit.control();
2162 }
2163 if (callprojs->catchall_memproj != nullptr) {
2164 Node* ex_mem = ekit.reset_memory();
2165 C->gvn_replace_by(callprojs->catchall_memproj, ex_mem);
2166 add_mergemem_users_to_worklist(wl, ex_mem);
2167 }
2168 if (callprojs->catchall_ioproj != nullptr) {
2169 C->gvn_replace_by(callprojs->catchall_ioproj, ekit.i_o());
2170 }
2171
2172 // Replace the old exception object with the newly created one
2173 if (callprojs->exobj != nullptr) {
2174 C->gvn_replace_by(callprojs->exobj, ex_oop);
2175 }
2176 }
2177
2178 // Disconnect the call from the graph
2179 call->disconnect_inputs(C);
2180 C->gvn_replace_by(call, C->top());
2181
2182 // Clean up any MergeMems that feed other MergeMems since the
2183 // optimizer doesn't like that.
2184 while (wl.size() > 0) {
2185 _gvn.transform(wl.pop());
2186 }
2187
2188 if (callprojs->fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) {
2189 replaced_nodes.apply(C, final_ctl);
2190 }
2191 if (!ex_ctl->is_top() && do_replaced_nodes) {
2192 replaced_nodes_exception.apply(C, ex_ctl);
2193 }
2194 }
2195
2196
2197 //------------------------------increment_counter------------------------------
2198 // for statistics: increment a VM counter by 1
2199
2200 void GraphKit::increment_counter(address counter_addr) {
2201 Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2202 increment_counter(adr1);
2203 }
2204
2205 void GraphKit::increment_counter(Node* counter_addr) {
2206 int adr_type = Compile::AliasIdxRaw;
2207 Node* ctrl = control();
2208 Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered);
2367 *
2368 * @param n node that the type applies to
2369 * @param exact_kls type from profiling
2370 * @param maybe_null did profiling see null?
2371 *
2372 * @return node with improved type
2373 */
2374 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2375 const Type* current_type = _gvn.type(n);
2376 assert(UseTypeSpeculation, "type speculation must be on");
2377
2378 const TypePtr* speculative = current_type->speculative();
2379
2380 // Should the klass from the profile be recorded in the speculative type?
2381 if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2382 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls, Type::trust_interfaces);
2383 const TypeOopPtr* xtype = tklass->as_instance_type();
2384 assert(xtype->klass_is_exact(), "Should be exact");
2385 // Any reason to believe n is not null (from this profiling or a previous one)?
2386 assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2387 const TypePtr* ptr = (ptr_kind != ProfileNeverNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2388 // record the new speculative type's depth
2389 speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2390 speculative = speculative->with_inline_depth(jvms()->depth());
2391 } else if (current_type->would_improve_ptr(ptr_kind)) {
2392 // Profiling report that null was never seen so we can change the
2393 // speculative type to non null ptr.
2394 if (ptr_kind == ProfileAlwaysNull) {
2395 speculative = TypePtr::NULL_PTR;
2396 } else {
2397 assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2398 const TypePtr* ptr = TypePtr::NOTNULL;
2399 if (speculative != nullptr) {
2400 speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2401 } else {
2402 speculative = ptr;
2403 }
2404 }
2405 }
2406
2407 if (speculative != current_type->speculative()) {
2408 // Build a type with a speculative type (what we think we know
2409 // about the type but will need a guard when we use it)
2410 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative);
2411 // We're changing the type, we need a new CheckCast node to carry
2412 // the new type. The new type depends on the control: what
2413 // profiling tells us is only valid from here as far as we can
2414 // tell.
2415 Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2416 cast = _gvn.transform(cast);
2417 replace_in_map(n, cast);
2418 n = cast;
2419 }
2420
2421 return n;
2422 }
2423
2424 /**
2425 * Record profiling data from receiver profiling at an invoke with the
2426 * type system so that it can propagate it (speculation)
2427 *
2428 * @param n receiver node
2429 *
2430 * @return node with improved type
2431 */
2432 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2433 if (!UseTypeSpeculation) {
2434 return n;
2435 }
2436 ciKlass* exact_kls = profile_has_unique_klass();
2437 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2438 if ((java_bc() == Bytecodes::_checkcast ||
2439 java_bc() == Bytecodes::_instanceof ||
2440 java_bc() == Bytecodes::_aastore) &&
2441 method()->method_data()->is_mature()) {
2442 ciProfileData* data = method()->method_data()->bci_to_data(bci());
2443 if (data != nullptr) {
2444 if (java_bc() == Bytecodes::_aastore) {
2445 ciKlass* array_type = nullptr;
2446 ciKlass* element_type = nullptr;
2447 ProfilePtrKind element_ptr = ProfileMaybeNull;
2448 bool flat_array = true;
2449 bool null_free_array = true;
2450 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
2451 exact_kls = element_type;
2452 ptr_kind = element_ptr;
2453 } else {
2454 if (!data->as_BitData()->null_seen()) {
2455 ptr_kind = ProfileNeverNull;
2456 } else {
2457 assert(data->is_ReceiverTypeData(), "bad profile data type");
2458 ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2459 uint i = 0;
2460 for (; i < call->row_limit(); i++) {
2461 ciKlass* receiver = call->receiver(i);
2462 if (receiver != nullptr) {
2463 break;
2464 }
2465 }
2466 ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2467 }
2468 }
2469 }
2470 }
2471 return record_profile_for_speculation(n, exact_kls, ptr_kind);
2472 }
2473
2474 /**
2475 * Record profiling data from argument profiling at an invoke with the
2476 * type system so that it can propagate it (speculation)
2477 *
2478 * @param dest_method target method for the call
2479 * @param bc what invoke bytecode is this?
2480 */
2481 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2482 if (!UseTypeSpeculation) {
2483 return;
2484 }
2485 const TypeFunc* tf = TypeFunc::make(dest_method);
2486 int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2487 int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2488 for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2489 const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2490 if (is_reference_type(targ->basic_type())) {
2491 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2492 ciKlass* better_type = nullptr;
2493 if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2494 record_profile_for_speculation(argument(j), better_type, ptr_kind);
2495 }
2496 i++;
2497 }
2498 }
2499 }
2500
2501 /**
2502 * Record profiling data from parameter profiling at an invoke with
2503 * the type system so that it can propagate it (speculation)
2504 */
2505 void GraphKit::record_profiled_parameters_for_speculation() {
2506 if (!UseTypeSpeculation) {
2507 return;
2508 }
2509 for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
2523 * the type system so that it can propagate it (speculation)
2524 */
2525 void GraphKit::record_profiled_return_for_speculation() {
2526 if (!UseTypeSpeculation) {
2527 return;
2528 }
2529 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2530 ciKlass* better_type = nullptr;
2531 if (method()->return_profiled_type(bci(), better_type, ptr_kind)) {
2532 // If profiling reports a single type for the return value,
2533 // feed it to the type system so it can propagate it as a
2534 // speculative type
2535 record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind);
2536 }
2537 }
2538
2539 void GraphKit::round_double_arguments(ciMethod* dest_method) {
2540 if (Matcher::strict_fp_requires_explicit_rounding) {
2541 // (Note: TypeFunc::make has a cache that makes this fast.)
2542 const TypeFunc* tf = TypeFunc::make(dest_method);
2543 int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2544 for (int j = 0; j < nargs; j++) {
2545 const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2546 if (targ->basic_type() == T_DOUBLE) {
2547 // If any parameters are doubles, they must be rounded before
2548 // the call, dprecision_rounding does gvn.transform
2549 Node *arg = argument(j);
2550 arg = dprecision_rounding(arg);
2551 set_argument(j, arg);
2552 }
2553 }
2554 }
2555 }
2556
2557 // rounding for strict float precision conformance
2558 Node* GraphKit::precision_rounding(Node* n) {
2559 if (Matcher::strict_fp_requires_explicit_rounding) {
2560 #ifdef IA32
2561 if (UseSSE == 0) {
2562 return _gvn.transform(new RoundFloatNode(nullptr, n));
2563 }
2564 #else
2565 Unimplemented();
2674 // The first null ends the list.
2675 Node* parm0, Node* parm1,
2676 Node* parm2, Node* parm3,
2677 Node* parm4, Node* parm5,
2678 Node* parm6, Node* parm7) {
2679 assert(call_addr != nullptr, "must not call null targets");
2680
2681 // Slow-path call
2682 bool is_leaf = !(flags & RC_NO_LEAF);
2683 bool has_io = (!is_leaf && !(flags & RC_NO_IO));
2684 if (call_name == nullptr) {
2685 assert(!is_leaf, "must supply name for leaf");
2686 call_name = OptoRuntime::stub_name(call_addr);
2687 }
2688 CallNode* call;
2689 if (!is_leaf) {
2690 call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
2691 } else if (flags & RC_NO_FP) {
2692 call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2693 } else if (flags & RC_VECTOR){
2694 uint num_bits = call_type->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
2695 call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
2696 } else {
2697 call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2698 }
2699
2700 // The following is similar to set_edges_for_java_call,
2701 // except that the memory effects of the call are restricted to AliasIdxRaw.
2702
2703 // Slow path call has no side-effects, uses few values
2704 bool wide_in = !(flags & RC_NARROW_MEM);
2705 bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2706
2707 Node* prev_mem = nullptr;
2708 if (wide_in) {
2709 prev_mem = set_predefined_input_for_runtime_call(call);
2710 } else {
2711 assert(!wide_out, "narrow in => narrow out");
2712 Node* narrow_mem = memory(adr_type);
2713 prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2714 }
2754
2755 if (has_io) {
2756 set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2757 }
2758 return call;
2759
2760 }
2761
2762 // i2b
2763 Node* GraphKit::sign_extend_byte(Node* in) {
2764 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
2765 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
2766 }
2767
2768 // i2s
2769 Node* GraphKit::sign_extend_short(Node* in) {
2770 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
2771 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
2772 }
2773
2774
2775 //------------------------------merge_memory-----------------------------------
2776 // Merge memory from one path into the current memory state.
2777 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
2778 for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
2779 Node* old_slice = mms.force_memory();
2780 Node* new_slice = mms.memory2();
2781 if (old_slice != new_slice) {
2782 PhiNode* phi;
2783 if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
2784 if (mms.is_empty()) {
2785 // clone base memory Phi's inputs for this memory slice
2786 assert(old_slice == mms.base_memory(), "sanity");
2787 phi = PhiNode::make(region, nullptr, Type::MEMORY, mms.adr_type(C));
2788 _gvn.set_type(phi, Type::MEMORY);
2789 for (uint i = 1; i < phi->req(); i++) {
2790 phi->init_req(i, old_slice->in(i));
2791 }
2792 } else {
2793 phi = old_slice->as_Phi(); // Phi was generated already
2794 }
3057
3058 // Now do a linear scan of the secondary super-klass array. Again, no real
3059 // performance impact (too rare) but it's gotta be done.
3060 // Since the code is rarely used, there is no penalty for moving it
3061 // out of line, and it can only improve I-cache density.
3062 // The decision to inline or out-of-line this final check is platform
3063 // dependent, and is found in the AD file definition of PartialSubtypeCheck.
3064 Node* psc = gvn.transform(
3065 new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
3066
3067 IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
3068 r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
3069 r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
3070
3071 // Return false path; set default control to true path.
3072 *ctrl = gvn.transform(r_ok_subtype);
3073 return gvn.transform(r_not_subtype);
3074 }
3075
3076 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
3077 const Type* sub_t = _gvn.type(obj_or_subklass);
3078 if (sub_t->make_oopptr() != nullptr && sub_t->make_oopptr()->is_inlinetypeptr()) {
3079 sub_t = TypeKlassPtr::make(sub_t->inline_klass());
3080 obj_or_subklass = makecon(sub_t);
3081 }
3082 bool expand_subtype_check = C->post_loop_opts_phase(); // macro node expansion is over
3083 if (expand_subtype_check) {
3084 MergeMemNode* mem = merged_memory();
3085 Node* ctrl = control();
3086 Node* subklass = obj_or_subklass;
3087 if (!sub_t->isa_klassptr()) {
3088 subklass = load_object_klass(obj_or_subklass);
3089 }
3090
3091 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn, method(), bci());
3092 set_control(ctrl);
3093 return n;
3094 }
3095
3096 Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass, method(), bci()));
3097 Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
3098 IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
3099 set_control(_gvn.transform(new IfTrueNode(iff)));
3100 return _gvn.transform(new IfFalseNode(iff));
3101 }
3102
3103 // Profile-driven exact type check:
3104 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
3105 float prob, Node* *casted_receiver) {
3106 assert(!klass->is_interface(), "no exact type check on interfaces");
3107 Node* fail = top();
3108 const Type* rec_t = _gvn.type(receiver);
3109 if (rec_t->is_inlinetypeptr()) {
3110 if (klass->equals(rec_t->inline_klass())) {
3111 (*casted_receiver) = receiver; // Always passes
3112 } else {
3113 (*casted_receiver) = top(); // Always fails
3114 fail = control();
3115 set_control(top());
3116 }
3117 return fail;
3118 }
3119 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces);
3120 Node* recv_klass = load_object_klass(receiver);
3121 fail = type_check(recv_klass, tklass, prob);
3122
3123 if (!stopped()) {
3124 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3125 const TypeOopPtr* recv_xtype = tklass->as_instance_type();
3126 assert(recv_xtype->klass_is_exact(), "");
3127
3128 if (!receiver_type->higher_equal(recv_xtype)) { // ignore redundant casts
3129 // Subsume downstream occurrences of receiver with a cast to
3130 // recv_xtype, since now we know what the type will be.
3131 Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype);
3132 Node* res = _gvn.transform(cast);
3133 if (recv_xtype->is_inlinetypeptr()) {
3134 assert(!gvn().type(res)->maybe_null(), "receiver should never be null");
3135 res = InlineTypeNode::make_from_oop(this, res, recv_xtype->inline_klass());
3136 }
3137 (*casted_receiver) = res;
3138 assert(!(*casted_receiver)->is_top(), "that path should be unreachable");
3139 // (User must make the replace_in_map call.)
3140 }
3141 }
3142
3143 return fail;
3144 }
3145
3146 Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass,
3147 float prob) {
3148 Node* want_klass = makecon(tklass);
3149 Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
3150 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
3151 IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
3152 set_control(_gvn.transform(new IfTrueNode (iff)));
3153 Node* fail = _gvn.transform(new IfFalseNode(iff));
3154 return fail;
3155 }
3156
3157 //------------------------------subtype_check_receiver-------------------------
3158 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
3159 Node** casted_receiver) {
3160 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces)->try_improve();
3161 Node* want_klass = makecon(tklass);
3162
3163 Node* slow_ctl = gen_subtype_check(receiver, want_klass);
3164
3165 // Ignore interface type information until interface types are properly tracked.
3166 if (!stopped() && !klass->is_interface()) {
3167 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3168 const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
3169 if (receiver_type != nullptr && !receiver_type->higher_equal(recv_type)) { // ignore redundant casts
3170 Node* cast = _gvn.transform(new CheckCastPPNode(control(), receiver, recv_type));
3171 if (recv_type->is_inlinetypeptr()) {
3172 cast = InlineTypeNode::make_from_oop(this, cast, recv_type->inline_klass());
3173 }
3174 (*casted_receiver) = cast;
3175 }
3176 }
3177
3178 return slow_ctl;
3179 }
3180
3181 //------------------------------seems_never_null-------------------------------
3182 // Use null_seen information if it is available from the profile.
3183 // If we see an unexpected null at a type check we record it and force a
3184 // recompile; the offending check will be recompiled to handle nulls.
3185 // If we see several offending BCIs, then all checks in the
3186 // method will be recompiled.
3187 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
3188 speculating = !_gvn.type(obj)->speculative_maybe_null();
3189 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
3190 if (UncommonNullCast // Cutout for this technique
3191 && obj != null() // And not the -Xcomp stupid case?
3192 && !too_many_traps(reason)
3193 ) {
3194 if (speculating) {
3263
3264 //------------------------maybe_cast_profiled_receiver-------------------------
3265 // If the profile has seen exactly one type, narrow to exactly that type.
3266 // Subsequent type checks will always fold up.
3267 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3268 const TypeKlassPtr* require_klass,
3269 ciKlass* spec_klass,
3270 bool safe_for_replace) {
3271 if (!UseTypeProfile || !TypeProfileCasts) return nullptr;
3272
3273 Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != nullptr);
3274
3275 // Make sure we haven't already deoptimized from this tactic.
3276 if (too_many_traps_or_recompiles(reason))
3277 return nullptr;
3278
3279 // (No, this isn't a call, but it's enough like a virtual call
3280 // to use the same ciMethod accessor to get the profile info...)
3281 // If we have a speculative type use it instead of profiling (which
3282 // may not help us)
3283 ciKlass* exact_kls = spec_klass;
3284 if (exact_kls == nullptr) {
3285 if (java_bc() == Bytecodes::_aastore) {
3286 ciKlass* array_type = nullptr;
3287 ciKlass* element_type = nullptr;
3288 ProfilePtrKind element_ptr = ProfileMaybeNull;
3289 bool flat_array = true;
3290 bool null_free_array = true;
3291 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
3292 exact_kls = element_type;
3293 } else {
3294 exact_kls = profile_has_unique_klass();
3295 }
3296 }
3297 if (exact_kls != nullptr) {// no cast failures here
3298 if (require_klass == nullptr ||
3299 C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) {
3300 // If we narrow the type to match what the type profile sees or
3301 // the speculative type, we can then remove the rest of the
3302 // cast.
3303 // This is a win, even if the exact_kls is very specific,
3304 // because downstream operations, such as method calls,
3305 // will often benefit from the sharper type.
3306 Node* exact_obj = not_null_obj; // will get updated in place...
3307 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
3308 &exact_obj);
3309 { PreserveJVMState pjvms(this);
3310 set_control(slow_ctl);
3311 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3312 }
3313 if (safe_for_replace) {
3314 replace_in_map(not_null_obj, exact_obj);
3315 }
3316 return exact_obj;
3406 // If not_null_obj is dead, only null-path is taken
3407 if (stopped()) { // Doing instance-of on a null?
3408 set_control(null_ctl);
3409 return intcon(0);
3410 }
3411 region->init_req(_null_path, null_ctl);
3412 phi ->init_req(_null_path, intcon(0)); // Set null path value
3413 if (null_ctl == top()) {
3414 // Do this eagerly, so that pattern matches like is_diamond_phi
3415 // will work even during parsing.
3416 assert(_null_path == PATH_LIMIT-1, "delete last");
3417 region->del_req(_null_path);
3418 phi ->del_req(_null_path);
3419 }
3420
3421 // Do we know the type check always succeed?
3422 bool known_statically = false;
3423 if (_gvn.type(superklass)->singleton()) {
3424 const TypeKlassPtr* superk = _gvn.type(superklass)->is_klassptr();
3425 const TypeKlassPtr* subk = _gvn.type(obj)->is_oopptr()->as_klass_type();
3426 if (subk != nullptr && subk->is_loaded()) {
3427 int static_res = C->static_subtype_check(superk, subk);
3428 known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
3429 }
3430 }
3431
3432 if (!known_statically) {
3433 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3434 // We may not have profiling here or it may not help us. If we
3435 // have a speculative type use it to perform an exact cast.
3436 ciKlass* spec_obj_type = obj_type->speculative_type();
3437 if (spec_obj_type != nullptr || (ProfileDynamicTypes && data != nullptr)) {
3438 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, nullptr, spec_obj_type, safe_for_replace);
3439 if (stopped()) { // Profile disagrees with this path.
3440 set_control(null_ctl); // Null is the only remaining possibility.
3441 return intcon(0);
3442 }
3443 if (cast_obj != nullptr) {
3444 not_null_obj = cast_obj;
3445 }
3446 }
3462 record_for_igvn(region);
3463
3464 // If we know the type check always succeeds then we don't use the
3465 // profiling data at this bytecode. Don't lose it, feed it to the
3466 // type system as a speculative type.
3467 if (safe_for_replace) {
3468 Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3469 replace_in_map(obj, casted_obj);
3470 }
3471
3472 return _gvn.transform(phi);
3473 }
3474
3475 //-------------------------------gen_checkcast---------------------------------
3476 // Generate a checkcast idiom. Used by both the checkcast bytecode and the
3477 // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
3478 // uncommon-trap paths work. Adjust stack after this call.
3479 // If failure_control is supplied and not null, it is filled in with
3480 // the control edge for the cast failure. Otherwise, an appropriate
3481 // uncommon trap or exception is thrown.
3482 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_control, bool null_free) {
3483 kill_dead_locals(); // Benefit all the uncommon traps
3484 const TypeKlassPtr* klass_ptr_type = _gvn.type(superklass)->is_klassptr();
3485 const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve();
3486 const TypeOopPtr* toop = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type();
3487 bool safe_for_replace = (failure_control == nullptr);
3488 assert(!null_free || toop->is_inlinetypeptr(), "must be an inline type pointer");
3489
3490 // Fast cutout: Check the case that the cast is vacuously true.
3491 // This detects the common cases where the test will short-circuit
3492 // away completely. We do this before we perform the null check,
3493 // because if the test is going to turn into zero code, we don't
3494 // want a residual null check left around. (Causes a slowdown,
3495 // for example, in some objArray manipulations, such as a[i]=a[j].)
3496 if (improved_klass_ptr_type->singleton()) {
3497 const TypeKlassPtr* kptr = nullptr;
3498 const Type* t = _gvn.type(obj);
3499 if (t->isa_oop_ptr()) {
3500 kptr = t->is_oopptr()->as_klass_type();
3501 } else if (obj->is_InlineType()) {
3502 ciInlineKlass* vk = t->inline_klass();
3503 kptr = TypeInstKlassPtr::make(TypePtr::NotNull, vk, Type::Offset(0));
3504 }
3505 if (kptr != nullptr) {
3506 switch (C->static_subtype_check(improved_klass_ptr_type, kptr)) {
3507 case Compile::SSC_always_true:
3508 // If we know the type check always succeed then we don't use
3509 // the profiling data at this bytecode. Don't lose it, feed it
3510 // to the type system as a speculative type.
3511 obj = record_profiled_receiver_for_speculation(obj);
3512 if (null_free) {
3513 assert(safe_for_replace, "must be");
3514 obj = null_check(obj);
3515 }
3516 assert(stopped() || !toop->is_inlinetypeptr() || obj->is_InlineType(), "should have been scalarized");
3517 return obj;
3518 case Compile::SSC_always_false:
3519 if (null_free) {
3520 assert(safe_for_replace, "must be");
3521 obj = null_check(obj);
3522 }
3523 // It needs a null check because a null will *pass* the cast check.
3524 if (t->isa_oopptr() != nullptr && !t->is_oopptr()->maybe_null()) {
3525 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3526 Deoptimization::DeoptReason reason = is_aastore ?
3527 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3528 builtin_throw(reason);
3529 return top();
3530 } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3531 return null_assert(obj);
3532 }
3533 break; // Fall through to full check
3534 default:
3535 break;
3536 }
3537 }
3538 }
3539
3540 ciProfileData* data = nullptr;
3541 if (failure_control == nullptr) { // use MDO in regular case only
3542 assert(java_bc() == Bytecodes::_aastore ||
3543 java_bc() == Bytecodes::_checkcast,
3544 "interpreter profiles type checks only for these BCs");
3545 if (method()->method_data()->is_mature()) {
3546 data = method()->method_data()->bci_to_data(bci());
3547 }
3548 }
3549
3550 // Make the merge point
3551 enum { _obj_path = 1, _null_path, PATH_LIMIT };
3552 RegionNode* region = new RegionNode(PATH_LIMIT);
3553 Node* phi = new PhiNode(region, toop);
3554 _gvn.set_type(region, Type::CONTROL);
3555 _gvn.set_type(phi, toop);
3556
3557 C->set_has_split_ifs(true); // Has chance for split-if optimization
3558
3559 // Use null-cast information if it is available
3560 bool speculative_not_null = false;
3561 bool never_see_null = ((failure_control == nullptr) // regular case only
3562 && seems_never_null(obj, data, speculative_not_null));
3563
3564 if (obj->is_InlineType()) {
3565 // Re-execute if buffering during triggers deoptimization
3566 PreserveReexecuteState preexecs(this);
3567 jvms()->set_should_reexecute(true);
3568 obj = obj->as_InlineType()->buffer(this, safe_for_replace);
3569 }
3570
3571 // Null check; get casted pointer; set region slot 3
3572 Node* null_ctl = top();
3573 Node* not_null_obj = nullptr;
3574 if (null_free) {
3575 assert(safe_for_replace, "must be");
3576 not_null_obj = null_check(obj);
3577 } else {
3578 not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3579 }
3580
3581 // If not_null_obj is dead, only null-path is taken
3582 if (stopped()) { // Doing instance-of on a null?
3583 set_control(null_ctl);
3584 if (toop->is_inlinetypeptr()) {
3585 return InlineTypeNode::make_null(_gvn, toop->inline_klass());
3586 }
3587 return null();
3588 }
3589 region->init_req(_null_path, null_ctl);
3590 phi ->init_req(_null_path, null()); // Set null path value
3591 if (null_ctl == top()) {
3592 // Do this eagerly, so that pattern matches like is_diamond_phi
3593 // will work even during parsing.
3594 assert(_null_path == PATH_LIMIT-1, "delete last");
3595 region->del_req(_null_path);
3596 phi ->del_req(_null_path);
3597 }
3598
3599 Node* cast_obj = nullptr;
3600 if (improved_klass_ptr_type->klass_is_exact()) {
3601 // The following optimization tries to statically cast the speculative type of the object
3602 // (for example obtained during profiling) to the type of the superklass and then do a
3603 // dynamic check that the type of the object is what we expect. To work correctly
3604 // for checkcast and aastore the type of superklass should be exact.
3605 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3606 // We may not have profiling here or it may not help us. If we have
3607 // a speculative type use it to perform an exact cast.
3608 ciKlass* spec_obj_type = obj_type->speculative_type();
3609 if (spec_obj_type != nullptr || data != nullptr) {
3610 cast_obj = maybe_cast_profiled_receiver(not_null_obj, improved_klass_ptr_type, spec_obj_type, safe_for_replace);
3611 if (cast_obj != nullptr) {
3612 if (failure_control != nullptr) // failure is now impossible
3613 (*failure_control) = top();
3614 // adjust the type of the phi to the exact klass:
3615 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3616 }
3617 }
3618 }
3619
3620 if (cast_obj == nullptr) {
3621 // Generate the subtype check
3622 Node* improved_superklass = superklass;
3623 if (improved_klass_ptr_type != klass_ptr_type && improved_klass_ptr_type->singleton()) {
3624 // Only improve the super class for constants which allows subsequent sub type checks to possibly be commoned up.
3625 // The other non-constant cases cannot be improved with a cast node here since they could be folded to top.
3626 // Additionally, the benefit would only be minor in non-constant cases.
3627 improved_superklass = makecon(improved_klass_ptr_type);
3628 }
3629 Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, improved_superklass);
3630 // Plug in success path into the merge
3631 cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3632 // Failure path ends in uncommon trap (or may be dead - failure impossible)
3633 if (failure_control == nullptr) {
3634 if (not_subtype_ctrl != top()) { // If failure is possible
3635 PreserveJVMState pjvms(this);
3636 set_control(not_subtype_ctrl);
3637 Node* obj_klass = nullptr;
3638 if (not_null_obj->is_InlineType()) {
3639 obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->inline_klass()));
3640 } else {
3641 obj_klass = load_object_klass(not_null_obj);
3642 }
3643 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3644 Deoptimization::DeoptReason reason = is_aastore ?
3645 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3646 builtin_throw(reason);
3647 }
3648 } else {
3649 (*failure_control) = not_subtype_ctrl;
3650 }
3651 }
3652
3653 region->init_req(_obj_path, control());
3654 phi ->init_req(_obj_path, cast_obj);
3655
3656 // A merge of null or Casted-NotNull obj
3657 Node* res = _gvn.transform(phi);
3658
3659 // Note I do NOT always 'replace_in_map(obj,result)' here.
3660 // if( tk->klass()->can_be_primary_super() )
3661 // This means that if I successfully store an Object into an array-of-String
3662 // I 'forget' that the Object is really now known to be a String. I have to
3663 // do this because we don't have true union types for interfaces - if I store
3664 // a Baz into an array-of-Interface and then tell the optimizer it's an
3665 // Interface, I forget that it's also a Baz and cannot do Baz-like field
3666 // references to it. FIX THIS WHEN UNION TYPES APPEAR!
3667 // replace_in_map( obj, res );
3668
3669 // Return final merged results
3670 set_control( _gvn.transform(region) );
3671 record_for_igvn(region);
3672
3673 bool not_inline = !toop->can_be_inline_type();
3674 bool not_flat_in_array = !UseFlatArray || not_inline || (toop->is_inlinetypeptr() && !toop->inline_klass()->flat_in_array());
3675 if (EnableValhalla && not_flat_in_array) {
3676 // Check if obj has been loaded from an array
3677 obj = obj->isa_DecodeN() ? obj->in(1) : obj;
3678 Node* array = nullptr;
3679 if (obj->isa_Load()) {
3680 Node* address = obj->in(MemNode::Address);
3681 if (address->isa_AddP()) {
3682 array = address->as_AddP()->in(AddPNode::Base);
3683 }
3684 } else if (obj->is_Phi()) {
3685 Node* region = obj->in(0);
3686 // TODO make this more robust (see JDK-8231346)
3687 if (region->req() == 3 && region->in(2) != nullptr && region->in(2)->in(0) != nullptr) {
3688 IfNode* iff = region->in(2)->in(0)->isa_If();
3689 if (iff != nullptr) {
3690 iff->is_flat_array_check(&_gvn, &array);
3691 }
3692 }
3693 }
3694 if (array != nullptr) {
3695 const TypeAryPtr* ary_t = _gvn.type(array)->isa_aryptr();
3696 if (ary_t != nullptr && !ary_t->is_flat()) {
3697 if (!ary_t->is_not_null_free() && not_inline) {
3698 // Casting array element to a non-inline-type, mark array as not null-free.
3699 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_null_free()));
3700 replace_in_map(array, cast);
3701 } else if (!ary_t->is_not_flat()) {
3702 // Casting array element to a non-flat type, mark array as not flat.
3703 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_flat()));
3704 replace_in_map(array, cast);
3705 }
3706 }
3707 }
3708 }
3709
3710 if (!stopped() && !res->is_InlineType()) {
3711 res = record_profiled_receiver_for_speculation(res);
3712 if (toop->is_inlinetypeptr()) {
3713 Node* vt = InlineTypeNode::make_from_oop(this, res, toop->inline_klass(), !gvn().type(res)->maybe_null());
3714 res = vt;
3715 if (safe_for_replace) {
3716 replace_in_map(obj, vt);
3717 replace_in_map(not_null_obj, vt);
3718 replace_in_map(res, vt);
3719 }
3720 }
3721 }
3722 return res;
3723 }
3724
3725 Node* GraphKit::mark_word_test(Node* obj, uintptr_t mask_val, bool eq, bool check_lock) {
3726 // Load markword
3727 Node* mark_adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3728 Node* mark = make_load(nullptr, mark_adr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
3729 if (check_lock) {
3730 // Check if obj is locked
3731 Node* locked_bit = MakeConX(markWord::unlocked_value);
3732 locked_bit = _gvn.transform(new AndXNode(locked_bit, mark));
3733 Node* cmp = _gvn.transform(new CmpXNode(locked_bit, MakeConX(0)));
3734 Node* is_unlocked = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3735 IfNode* iff = new IfNode(control(), is_unlocked, PROB_MAX, COUNT_UNKNOWN);
3736 _gvn.transform(iff);
3737 Node* locked_region = new RegionNode(3);
3738 Node* mark_phi = new PhiNode(locked_region, TypeX_X);
3739
3740 // Unlocked: Use bits from mark word
3741 locked_region->init_req(1, _gvn.transform(new IfTrueNode(iff)));
3742 mark_phi->init_req(1, mark);
3743
3744 // Locked: Load prototype header from klass
3745 set_control(_gvn.transform(new IfFalseNode(iff)));
3746 // Make loads control dependent to make sure they are only executed if array is locked
3747 Node* klass_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
3748 Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, control(), C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
3749 Node* proto_adr = basic_plus_adr(klass, in_bytes(Klass::prototype_header_offset()));
3750 Node* proto = _gvn.transform(LoadNode::make(_gvn, control(), C->immutable_memory(), proto_adr, proto_adr->bottom_type()->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
3751
3752 locked_region->init_req(2, control());
3753 mark_phi->init_req(2, proto);
3754 set_control(_gvn.transform(locked_region));
3755 record_for_igvn(locked_region);
3756
3757 mark = mark_phi;
3758 }
3759
3760 // Now check if mark word bits are set
3761 Node* mask = MakeConX(mask_val);
3762 Node* masked = _gvn.transform(new AndXNode(_gvn.transform(mark), mask));
3763 record_for_igvn(masked); // Give it a chance to be optimized out by IGVN
3764 Node* cmp = _gvn.transform(new CmpXNode(masked, mask));
3765 return _gvn.transform(new BoolNode(cmp, eq ? BoolTest::eq : BoolTest::ne));
3766 }
3767
3768 Node* GraphKit::inline_type_test(Node* obj, bool is_inline) {
3769 return mark_word_test(obj, markWord::inline_type_pattern, is_inline, /* check_lock = */ false);
3770 }
3771
3772 Node* GraphKit::flat_array_test(Node* array_or_klass, bool flat) {
3773 // We can't use immutable memory here because the mark word is mutable.
3774 // PhaseIdealLoop::move_flat_array_check_out_of_loop will make sure the
3775 // check is moved out of loops (mainly to enable loop unswitching).
3776 Node* cmp = _gvn.transform(new FlatArrayCheckNode(C, memory(Compile::AliasIdxRaw), array_or_klass));
3777 record_for_igvn(cmp); // Give it a chance to be optimized out by IGVN
3778 return _gvn.transform(new BoolNode(cmp, flat ? BoolTest::eq : BoolTest::ne));
3779 }
3780
3781 Node* GraphKit::null_free_array_test(Node* array, bool null_free) {
3782 return mark_word_test(array, markWord::null_free_array_bit_in_place, null_free);
3783 }
3784
3785 // Deoptimize if 'ary' is a null-free inline type array and 'val' is null
3786 Node* GraphKit::inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace) {
3787 RegionNode* region = new RegionNode(3);
3788 Node* null_ctl = top();
3789 null_check_oop(val, &null_ctl);
3790 if (null_ctl != top()) {
3791 PreserveJVMState pjvms(this);
3792 set_control(null_ctl);
3793 {
3794 // Deoptimize if null-free array
3795 BuildCutout unless(this, null_free_array_test(ary, /* null_free = */ false), PROB_MAX);
3796 inc_sp(nargs);
3797 uncommon_trap(Deoptimization::Reason_null_check,
3798 Deoptimization::Action_none);
3799 }
3800 region->init_req(1, control());
3801 }
3802 region->init_req(2, control());
3803 set_control(_gvn.transform(region));
3804 record_for_igvn(region);
3805 if (_gvn.type(val) == TypePtr::NULL_PTR) {
3806 // Since we were just successfully storing null, the array can't be null free.
3807 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
3808 ary_t = ary_t->cast_to_not_null_free();
3809 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
3810 if (safe_for_replace) {
3811 replace_in_map(ary, cast);
3812 }
3813 ary = cast;
3814 }
3815 return ary;
3816 }
3817
3818 //------------------------------next_monitor-----------------------------------
3819 // What number should be given to the next monitor?
3820 int GraphKit::next_monitor() {
3821 int current = jvms()->monitor_depth()* C->sync_stack_slots();
3822 int next = current + C->sync_stack_slots();
3823 // Keep the toplevel high water mark current:
3824 if (C->fixed_slots() < next) C->set_fixed_slots(next);
3825 return current;
3826 }
3827
3828 //------------------------------insert_mem_bar---------------------------------
3829 // Memory barrier to avoid floating things around
3830 // The membar serves as a pinch point between both control and all memory slices.
3831 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3832 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3833 mb->init_req(TypeFunc::Control, control());
3834 mb->init_req(TypeFunc::Memory, reset_memory());
3835 Node* membar = _gvn.transform(mb);
3863 }
3864 Node* membar = _gvn.transform(mb);
3865 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3866 if (alias_idx == Compile::AliasIdxBot) {
3867 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3868 } else {
3869 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3870 }
3871 return membar;
3872 }
3873
3874 //------------------------------shared_lock------------------------------------
3875 // Emit locking code.
3876 FastLockNode* GraphKit::shared_lock(Node* obj) {
3877 // bci is either a monitorenter bc or InvocationEntryBci
3878 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3879 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3880
3881 if( !GenerateSynchronizationCode )
3882 return nullptr; // Not locking things?
3883
3884 if (stopped()) // Dead monitor?
3885 return nullptr;
3886
3887 assert(dead_locals_are_killed(), "should kill locals before sync. point");
3888
3889 // Box the stack location
3890 Node* box = new BoxLockNode(next_monitor());
3891 // Check for bailout after new BoxLockNode
3892 if (failing()) { return nullptr; }
3893 box = _gvn.transform(box);
3894 Node* mem = reset_memory();
3895
3896 FastLockNode * flock = _gvn.transform(new FastLockNode(nullptr, obj, box) )->as_FastLock();
3897
3898 // Add monitor to debug info for the slow path. If we block inside the
3899 // slow path and de-opt, we need the monitor hanging around
3900 map()->push_monitor( flock );
3901
3902 const TypeFunc *tf = LockNode::lock_type();
3903 LockNode *lock = new LockNode(C, tf);
3932 }
3933 #endif
3934
3935 return flock;
3936 }
3937
3938
3939 //------------------------------shared_unlock----------------------------------
3940 // Emit unlocking code.
3941 void GraphKit::shared_unlock(Node* box, Node* obj) {
3942 // bci is either a monitorenter bc or InvocationEntryBci
3943 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3944 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3945
3946 if( !GenerateSynchronizationCode )
3947 return;
3948 if (stopped()) { // Dead monitor?
3949 map()->pop_monitor(); // Kill monitor from debug info
3950 return;
3951 }
3952 assert(!obj->is_InlineType(), "should not unlock on inline type");
3953
3954 // Memory barrier to avoid floating things down past the locked region
3955 insert_mem_bar(Op_MemBarReleaseLock);
3956
3957 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3958 UnlockNode *unlock = new UnlockNode(C, tf);
3959 #ifdef ASSERT
3960 unlock->set_dbg_jvms(sync_jvms());
3961 #endif
3962 uint raw_idx = Compile::AliasIdxRaw;
3963 unlock->init_req( TypeFunc::Control, control() );
3964 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3965 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3966 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3967 unlock->init_req( TypeFunc::ReturnAdr, top() );
3968
3969 unlock->init_req(TypeFunc::Parms + 0, obj);
3970 unlock->init_req(TypeFunc::Parms + 1, box);
3971 unlock = _gvn.transform(unlock)->as_Unlock();
3972
3973 Node* mem = reset_memory();
3974
3975 // unlock has no side-effects, sets few values
3976 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3977
3978 // Kill monitor from debug info
3979 map()->pop_monitor( );
3980 }
3981
3982 //-------------------------------get_layout_helper-----------------------------
3983 // If the given klass is a constant or known to be an array,
3984 // fetch the constant layout helper value into constant_value
3985 // and return null. Otherwise, load the non-constant
3986 // layout helper value, and return the node which represents it.
3987 // This two-faced routine is useful because allocation sites
3988 // almost always feature constant types.
3989 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3990 const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
3991 if (!StressReflectiveCode && klass_t != nullptr) {
3992 bool xklass = klass_t->klass_is_exact();
3993 bool can_be_flat = false;
3994 const TypeAryPtr* ary_type = klass_t->as_instance_type()->isa_aryptr();
3995 if (UseFlatArray && !xklass && ary_type != nullptr && !ary_type->is_null_free()) {
3996 // Don't constant fold if the runtime type might be a flat array but the static type is not.
3997 const TypeOopPtr* elem = ary_type->elem()->make_oopptr();
3998 can_be_flat = ary_type->can_be_inline_array() && (!elem->is_inlinetypeptr() || elem->inline_klass()->flat_in_array());
3999 }
4000 if (!can_be_flat && (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM))) {
4001 jint lhelper;
4002 if (klass_t->is_flat()) {
4003 lhelper = ary_type->flat_layout_helper();
4004 } else if (klass_t->isa_aryklassptr()) {
4005 BasicType elem = ary_type->elem()->array_element_basic_type();
4006 if (is_reference_type(elem, true)) {
4007 elem = T_OBJECT;
4008 }
4009 lhelper = Klass::array_layout_helper(elem);
4010 } else {
4011 lhelper = klass_t->is_instklassptr()->exact_klass()->layout_helper();
4012 }
4013 if (lhelper != Klass::_lh_neutral_value) {
4014 constant_value = lhelper;
4015 return (Node*) nullptr;
4016 }
4017 }
4018 }
4019 constant_value = Klass::_lh_neutral_value; // put in a known value
4020 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
4021 return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered);
4022 }
4023
4024 // We just put in an allocate/initialize with a big raw-memory effect.
4025 // Hook selected additional alias categories on the initialization.
4026 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
4027 MergeMemNode* init_in_merge,
4028 Node* init_out_raw) {
4029 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
4030 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
4031
4032 Node* prevmem = kit.memory(alias_idx);
4033 init_in_merge->set_memory_at(alias_idx, prevmem);
4034 if (init_out_raw != nullptr) {
4035 kit.set_memory(init_out_raw, alias_idx);
4036 }
4037 }
4038
4039 //---------------------------set_output_for_allocation-------------------------
4040 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
4041 const TypeOopPtr* oop_type,
4042 bool deoptimize_on_exception) {
4043 int rawidx = Compile::AliasIdxRaw;
4044 alloc->set_req( TypeFunc::FramePtr, frameptr() );
4045 add_safepoint_edges(alloc);
4046 Node* allocx = _gvn.transform(alloc);
4047 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
4048 // create memory projection for i_o
4049 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
4050 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
4051
4052 // create a memory projection as for the normal control path
4053 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
4054 set_memory(malloc, rawidx);
4055
4056 // a normal slow-call doesn't change i_o, but an allocation does
4057 // we create a separate i_o projection for the normal control path
4058 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
4059 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
4060
4061 // put in an initialization barrier
4062 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
4063 rawoop)->as_Initialize();
4064 assert(alloc->initialization() == init, "2-way macro link must work");
4065 assert(init ->allocation() == alloc, "2-way macro link must work");
4066 {
4067 // Extract memory strands which may participate in the new object's
4068 // initialization, and source them from the new InitializeNode.
4069 // This will allow us to observe initializations when they occur,
4070 // and link them properly (as a group) to the InitializeNode.
4071 assert(init->in(InitializeNode::Memory) == malloc, "");
4072 MergeMemNode* minit_in = MergeMemNode::make(malloc);
4073 init->set_req(InitializeNode::Memory, minit_in);
4074 record_for_igvn(minit_in); // fold it up later, if possible
4075 _gvn.set_type(minit_in, Type::MEMORY);
4076 Node* minit_out = memory(rawidx);
4077 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
4078 // Add an edge in the MergeMem for the header fields so an access
4079 // to one of those has correct memory state
4080 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
4081 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
4082 if (oop_type->isa_aryptr()) {
4083 const TypeAryPtr* arytype = oop_type->is_aryptr();
4084 if (arytype->is_flat()) {
4085 // Initially all flat array accesses share a single slice
4086 // but that changes after parsing. Prepare the memory graph so
4087 // it can optimize flat array accesses properly once they
4088 // don't share a single slice.
4089 assert(C->flat_accesses_share_alias(), "should be set at parse time");
4090 C->set_flat_accesses_share_alias(false);
4091 ciInlineKlass* vk = arytype->elem()->inline_klass();
4092 for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) {
4093 ciField* field = vk->nonstatic_field_at(i);
4094 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
4095 continue; // do not bother to track really large numbers of fields
4096 int off_in_vt = field->offset_in_bytes() - vk->first_field_offset();
4097 const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
4098 int fieldidx = C->get_alias_index(adr_type, true);
4099 // Pass nullptr for init_out. Having per flat array element field memory edges as uses of the Initialize node
4100 // can result in per flat array field Phis to be created which confuses the logic of
4101 // Compile::adjust_flat_array_access_aliases().
4102 hook_memory_on_init(*this, fieldidx, minit_in, nullptr);
4103 }
4104 C->set_flat_accesses_share_alias(true);
4105 hook_memory_on_init(*this, C->get_alias_index(TypeAryPtr::INLINES), minit_in, minit_out);
4106 } else {
4107 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
4108 int elemidx = C->get_alias_index(telemref);
4109 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
4110 }
4111 } else if (oop_type->isa_instptr()) {
4112 set_memory(minit_out, C->get_alias_index(oop_type)); // mark word
4113 ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
4114 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
4115 ciField* field = ik->nonstatic_field_at(i);
4116 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
4117 continue; // do not bother to track really large numbers of fields
4118 // Find (or create) the alias category for this field:
4119 int fieldidx = C->alias_type(field)->index();
4120 hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
4121 }
4122 }
4123 }
4124
4125 // Cast raw oop to the real thing...
4126 Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
4127 javaoop = _gvn.transform(javaoop);
4128 C->set_recent_alloc(control(), javaoop);
4129 assert(just_allocated_object(control()) == javaoop, "just allocated");
4130
4131 #ifdef ASSERT
4132 { // Verify that the AllocateNode::Ideal_allocation recognizers work:
4143 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
4144 }
4145 }
4146 #endif //ASSERT
4147
4148 return javaoop;
4149 }
4150
4151 //---------------------------new_instance--------------------------------------
4152 // This routine takes a klass_node which may be constant (for a static type)
4153 // or may be non-constant (for reflective code). It will work equally well
4154 // for either, and the graph will fold nicely if the optimizer later reduces
4155 // the type to a constant.
4156 // The optional arguments are for specialized use by intrinsics:
4157 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
4158 // - If 'return_size_val', report the total object size to the caller.
4159 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
4160 Node* GraphKit::new_instance(Node* klass_node,
4161 Node* extra_slow_test,
4162 Node* *return_size_val,
4163 bool deoptimize_on_exception,
4164 InlineTypeNode* inline_type_node) {
4165 // Compute size in doublewords
4166 // The size is always an integral number of doublewords, represented
4167 // as a positive bytewise size stored in the klass's layout_helper.
4168 // The layout_helper also encodes (in a low bit) the need for a slow path.
4169 jint layout_con = Klass::_lh_neutral_value;
4170 Node* layout_val = get_layout_helper(klass_node, layout_con);
4171 bool layout_is_con = (layout_val == nullptr);
4172
4173 if (extra_slow_test == nullptr) extra_slow_test = intcon(0);
4174 // Generate the initial go-slow test. It's either ALWAYS (return a
4175 // Node for 1) or NEVER (return a null) or perhaps (in the reflective
4176 // case) a computed value derived from the layout_helper.
4177 Node* initial_slow_test = nullptr;
4178 if (layout_is_con) {
4179 assert(!StressReflectiveCode, "stress mode does not use these paths");
4180 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
4181 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
4182 } else { // reflective case
4183 // This reflective path is used by Unsafe.allocateInstance.
4184 // (It may be stress-tested by specifying StressReflectiveCode.)
4185 // Basically, we want to get into the VM is there's an illegal argument.
4186 Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
4187 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
4188 if (extra_slow_test != intcon(0)) {
4189 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
4190 }
4191 // (Macro-expander will further convert this to a Bool, if necessary.)
4202
4203 // Clear the low bits to extract layout_helper_size_in_bytes:
4204 assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
4205 Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
4206 size = _gvn.transform( new AndXNode(size, mask) );
4207 }
4208 if (return_size_val != nullptr) {
4209 (*return_size_val) = size;
4210 }
4211
4212 // This is a precise notnull oop of the klass.
4213 // (Actually, it need not be precise if this is a reflective allocation.)
4214 // It's what we cast the result to.
4215 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
4216 if (!tklass) tklass = TypeInstKlassPtr::OBJECT;
4217 const TypeOopPtr* oop_type = tklass->as_instance_type();
4218
4219 // Now generate allocation code
4220
4221 // The entire memory state is needed for slow path of the allocation
4222 // since GC and deoptimization can happen.
4223 Node *mem = reset_memory();
4224 set_all_memory(mem); // Create new memory state
4225
4226 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
4227 control(), mem, i_o(),
4228 size, klass_node,
4229 initial_slow_test, inline_type_node);
4230
4231 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
4232 }
4233
4234 //-------------------------------new_array-------------------------------------
4235 // helper for newarray and anewarray
4236 // The 'length' parameter is (obviously) the length of the array.
4237 // The optional arguments are for specialized use by intrinsics:
4238 // - If 'return_size_val', report the non-padded array size (sum of header size
4239 // and array body) to the caller.
4240 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
4241 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
4242 Node* length, // number of array elements
4243 int nargs, // number of arguments to push back for uncommon trap
4244 Node* *return_size_val,
4245 bool deoptimize_on_exception) {
4246 jint layout_con = Klass::_lh_neutral_value;
4247 Node* layout_val = get_layout_helper(klass_node, layout_con);
4248 bool layout_is_con = (layout_val == nullptr);
4249
4250 if (!layout_is_con && !StressReflectiveCode &&
4251 !too_many_traps(Deoptimization::Reason_class_check)) {
4252 // This is a reflective array creation site.
4253 // Optimistically assume that it is a subtype of Object[],
4254 // so that we can fold up all the address arithmetic.
4255 layout_con = Klass::array_layout_helper(T_OBJECT);
4256 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
4257 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
4258 { BuildCutout unless(this, bol_lh, PROB_MAX);
4259 inc_sp(nargs);
4260 uncommon_trap(Deoptimization::Reason_class_check,
4261 Deoptimization::Action_maybe_recompile);
4262 }
4263 layout_val = nullptr;
4264 layout_is_con = true;
4265 }
4266
4267 // Generate the initial go-slow test. Make sure we do not overflow
4268 // if length is huge (near 2Gig) or negative! We do not need
4269 // exact double-words here, just a close approximation of needed
4270 // double-words. We can't add any offset or rounding bits, lest we
4271 // take a size -1 of bytes and make it positive. Use an unsigned
4272 // compare, so negative sizes look hugely positive.
4273 int fast_size_limit = FastAllocateSizeLimit;
4274 if (layout_is_con) {
4275 assert(!StressReflectiveCode, "stress mode does not use these paths");
4276 // Increase the size limit if we have exact knowledge of array type.
4277 int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
4278 fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0);
4279 }
4280
4281 Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
4282 Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
4283
4284 // --- Size Computation ---
4285 // array_size = round_to_heap(array_header + (length << elem_shift));
4286 // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
4287 // and align_to(x, y) == ((x + y-1) & ~(y-1))
4288 // The rounding mask is strength-reduced, if possible.
4289 int round_mask = MinObjAlignmentInBytes - 1;
4290 Node* header_size = nullptr;
4291 // (T_BYTE has the weakest alignment and size restrictions...)
4292 if (layout_is_con) {
4293 int hsize = Klass::layout_helper_header_size(layout_con);
4294 int eshift = Klass::layout_helper_log2_element_size(layout_con);
4295 bool is_flat_array = Klass::layout_helper_is_flatArray(layout_con);
4296 if ((round_mask & ~right_n_bits(eshift)) == 0)
4297 round_mask = 0; // strength-reduce it if it goes away completely
4298 assert(is_flat_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
4299 int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
4300 assert(header_size_min <= hsize, "generic minimum is smallest");
4301 header_size = intcon(hsize);
4302 } else {
4303 Node* hss = intcon(Klass::_lh_header_size_shift);
4304 Node* hsm = intcon(Klass::_lh_header_size_mask);
4305 header_size = _gvn.transform(new URShiftINode(layout_val, hss));
4306 header_size = _gvn.transform(new AndINode(header_size, hsm));
4307 }
4308
4309 Node* elem_shift = nullptr;
4310 if (layout_is_con) {
4311 int eshift = Klass::layout_helper_log2_element_size(layout_con);
4312 if (eshift != 0)
4313 elem_shift = intcon(eshift);
4314 } else {
4315 // There is no need to mask or shift this value.
4316 // The semantics of LShiftINode include an implicit mask to 0x1F.
4317 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
4318 elem_shift = layout_val;
4365 }
4366 Node* non_rounded_size = _gvn.transform(new AddXNode(headerx, abody));
4367
4368 if (return_size_val != nullptr) {
4369 // This is the size
4370 (*return_size_val) = non_rounded_size;
4371 }
4372
4373 Node* size = non_rounded_size;
4374 if (round_mask != 0) {
4375 Node* mask1 = MakeConX(round_mask);
4376 size = _gvn.transform(new AddXNode(size, mask1));
4377 Node* mask2 = MakeConX(~round_mask);
4378 size = _gvn.transform(new AndXNode(size, mask2));
4379 }
4380 // else if round_mask == 0, the size computation is self-rounding
4381
4382 // Now generate allocation code
4383
4384 // The entire memory state is needed for slow path of the allocation
4385 // since GC and deoptimization can happen.
4386 Node *mem = reset_memory();
4387 set_all_memory(mem); // Create new memory state
4388
4389 if (initial_slow_test->is_Bool()) {
4390 // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
4391 initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
4392 }
4393
4394 const TypeKlassPtr* ary_klass = _gvn.type(klass_node)->isa_klassptr();
4395 const TypeOopPtr* ary_type = ary_klass->as_instance_type();
4396 const TypeAryPtr* ary_ptr = ary_type->isa_aryptr();
4397
4398 // Inline type array variants:
4399 // - null-ok: ciObjArrayKlass with is_elem_null_free() = false
4400 // - null-free: ciObjArrayKlass with is_elem_null_free() = true
4401 // - null-free, flat: ciFlatArrayKlass with is_elem_null_free() = true
4402 // Check if array is a null-free, non-flat inline type array
4403 // that needs to be initialized with the default inline type.
4404 Node* default_value = nullptr;
4405 Node* raw_default_value = nullptr;
4406 if (ary_ptr != nullptr && ary_ptr->klass_is_exact()) {
4407 // Array type is known
4408 if (ary_ptr->is_null_free() && !ary_ptr->is_flat()) {
4409 ciInlineKlass* vk = ary_ptr->elem()->inline_klass();
4410 default_value = InlineTypeNode::default_oop(gvn(), vk);
4411 if (UseCompressedOops) {
4412 // With compressed oops, the 64-bit init value is built from two 32-bit compressed oops
4413 default_value = _gvn.transform(new EncodePNode(default_value, default_value->bottom_type()->make_narrowoop()));
4414 Node* lower = _gvn.transform(new CastP2XNode(control(), default_value));
4415 Node* upper = _gvn.transform(new LShiftLNode(lower, intcon(32)));
4416 raw_default_value = _gvn.transform(new OrLNode(lower, upper));
4417 } else {
4418 raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value));
4419 }
4420 }
4421 }
4422
4423 Node* valid_length_test = _gvn.intcon(1);
4424 if (ary_type->isa_aryptr()) {
4425 BasicType bt = ary_type->isa_aryptr()->elem()->array_element_basic_type();
4426 jint max = TypeAryPtr::max_array_length(bt);
4427 Node* valid_length_cmp = _gvn.transform(new CmpUNode(length, intcon(max)));
4428 valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le));
4429 }
4430
4431 // Create the AllocateArrayNode and its result projections
4432 AllocateArrayNode* alloc
4433 = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
4434 control(), mem, i_o(),
4435 size, klass_node,
4436 initial_slow_test,
4437 length, valid_length_test,
4438 default_value, raw_default_value);
4439 // Cast to correct type. Note that the klass_node may be constant or not,
4440 // and in the latter case the actual array type will be inexact also.
4441 // (This happens via a non-constant argument to inline_native_newArray.)
4442 // In any case, the value of klass_node provides the desired array type.
4443 const TypeInt* length_type = _gvn.find_int_type(length);
4444 if (ary_type->isa_aryptr() && length_type != nullptr) {
4445 // Try to get a better type than POS for the size
4446 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4447 }
4448
4449 Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
4450
4451 array_ideal_length(alloc, ary_type, true);
4452 return javaoop;
4453 }
4454
4455 // The following "Ideal_foo" functions are placed here because they recognize
4456 // the graph shapes created by the functions immediately above.
4457
4458 //---------------------------Ideal_allocation----------------------------------
4565 set_all_memory(ideal.merged_memory());
4566 set_i_o(ideal.i_o());
4567 set_control(ideal.ctrl());
4568 }
4569
4570 void GraphKit::final_sync(IdealKit& ideal) {
4571 // Final sync IdealKit and graphKit.
4572 sync_kit(ideal);
4573 }
4574
4575 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4576 Node* len = load_array_length(load_String_value(str, set_ctrl));
4577 Node* coder = load_String_coder(str, set_ctrl);
4578 // Divide length by 2 if coder is UTF16
4579 return _gvn.transform(new RShiftINode(len, coder));
4580 }
4581
4582 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4583 int value_offset = java_lang_String::value_offset();
4584 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4585 false, nullptr, Type::Offset(0));
4586 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4587 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4588 TypeAry::make(TypeInt::BYTE, TypeInt::POS, false, false, true, true),
4589 ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0));
4590 Node* p = basic_plus_adr(str, str, value_offset);
4591 Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4592 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4593 return load;
4594 }
4595
4596 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4597 if (!CompactStrings) {
4598 return intcon(java_lang_String::CODER_UTF16);
4599 }
4600 int coder_offset = java_lang_String::coder_offset();
4601 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4602 false, nullptr, Type::Offset(0));
4603 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4604
4605 Node* p = basic_plus_adr(str, str, coder_offset);
4606 Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4607 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4608 return load;
4609 }
4610
4611 void GraphKit::store_String_value(Node* str, Node* value) {
4612 int value_offset = java_lang_String::value_offset();
4613 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4614 false, nullptr, Type::Offset(0));
4615 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4616
4617 access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,
4618 value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4619 }
4620
4621 void GraphKit::store_String_coder(Node* str, Node* value) {
4622 int coder_offset = java_lang_String::coder_offset();
4623 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4624 false, nullptr, Type::Offset(0));
4625 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4626
4627 access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4628 value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4629 }
4630
4631 // Capture src and dst memory state with a MergeMemNode
4632 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4633 if (src_type == dst_type) {
4634 // Types are equal, we don't need a MergeMemNode
4635 return memory(src_type);
4636 }
4637 MergeMemNode* merge = MergeMemNode::make(map()->memory());
4638 record_for_igvn(merge); // fold it up later, if possible
4639 int src_idx = C->get_alias_index(src_type);
4640 int dst_idx = C->get_alias_index(dst_type);
4641 merge->set_memory_at(src_idx, memory(src_idx));
4642 merge->set_memory_at(dst_idx, memory(dst_idx));
4643 return merge;
4644 }
4717 i_char->init_req(2, AddI(i_char, intcon(2)));
4718
4719 set_control(IfFalse(iff));
4720 set_memory(st, TypeAryPtr::BYTES);
4721 }
4722
4723 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4724 if (!field->is_constant()) {
4725 return nullptr; // Field not marked as constant.
4726 }
4727 ciInstance* holder = nullptr;
4728 if (!field->is_static()) {
4729 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4730 if (const_oop != nullptr && const_oop->is_instance()) {
4731 holder = const_oop->as_instance();
4732 }
4733 }
4734 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4735 /*is_unsigned_load=*/false);
4736 if (con_type != nullptr) {
4737 Node* con = makecon(con_type);
4738 if (field->type()->is_inlinetype()) {
4739 con = InlineTypeNode::make_from_oop(this, con, field->type()->as_inline_klass(), field->is_null_free());
4740 } else if (con_type->is_inlinetypeptr()) {
4741 con = InlineTypeNode::make_from_oop(this, con, con_type->inline_klass(), field->is_null_free());
4742 }
4743 return con;
4744 }
4745 return nullptr;
4746 }
4747
4748 //---------------------------load_mirror_from_klass----------------------------
4749 // Given a klass oop, load its java mirror (a java.lang.Class oop).
4750 Node* GraphKit::load_mirror_from_klass(Node* klass) {
4751 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
4752 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4753 // mirror = ((OopHandle)mirror)->resolve();
4754 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
4755 }
4756
4757 Node* GraphKit::maybe_narrow_object_type(Node* obj, ciKlass* type) {
4758 const Type* obj_type = obj->bottom_type();
4759 const TypeOopPtr* sig_type = TypeOopPtr::make_from_klass(type);
4760 if (obj_type->isa_oopptr() && sig_type->is_loaded() && !obj_type->higher_equal(sig_type)) {
4761 const Type* narrow_obj_type = obj_type->filter_speculative(sig_type); // keep speculative part
4762 Node* casted_obj = gvn().transform(new CheckCastPPNode(control(), obj, narrow_obj_type));
4763 obj = casted_obj;
4764 }
4765 if (sig_type->is_inlinetypeptr()) {
4766 obj = InlineTypeNode::make_from_oop(this, obj, sig_type->inline_klass(), !gvn().type(obj)->maybe_null());
4767 }
4768 return obj;
4769 }
|