5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciUtilities.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "ci/ciObjArray.hpp"
28 #include "asm/register.hpp"
29 #include "compiler/compileLog.hpp"
30 #include "gc/shared/barrierSet.hpp"
31 #include "gc/shared/c2/barrierSetC2.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "opto/addnode.hpp"
35 #include "opto/castnode.hpp"
36 #include "opto/convertnode.hpp"
37 #include "opto/graphKit.hpp"
38 #include "opto/idealKit.hpp"
39 #include "opto/intrinsicnode.hpp"
40 #include "opto/locknode.hpp"
41 #include "opto/machnode.hpp"
42 #include "opto/opaquenode.hpp"
43 #include "opto/parse.hpp"
44 #include "opto/rootnode.hpp"
45 #include "opto/runtime.hpp"
46 #include "opto/subtypenode.hpp"
47 #include "runtime/deoptimization.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "utilities/bitMap.inline.hpp"
50 #include "utilities/powerOfTwo.hpp"
51 #include "utilities/growableArray.hpp"
52
53 //----------------------------GraphKit-----------------------------------------
54 // Main utility constructor.
55 GraphKit::GraphKit(JVMState* jvms)
56 : Phase(Phase::Parser),
57 _env(C->env()),
58 _gvn(*C->initial_gvn()),
59 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
60 {
61 _exceptions = jvms->map()->next_exception();
62 if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr);
63 set_jvms(jvms);
64 }
65
66 // Private constructor for parser.
67 GraphKit::GraphKit()
68 : Phase(Phase::Parser),
69 _env(C->env()),
70 _gvn(*C->initial_gvn()),
71 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
72 {
73 _exceptions = nullptr;
74 set_map(nullptr);
75 debug_only(_sp = -99);
76 debug_only(set_bci(-99));
77 }
78
79
80
81 //---------------------------clean_stack---------------------------------------
82 // Clear away rubbish from the stack area of the JVM state.
83 // This destroys any arguments that may be waiting on the stack.
841 if (PrintMiscellaneous && (Verbose || WizardMode)) {
842 tty->print_cr("Zombie local %d: ", local);
843 jvms->dump();
844 }
845 return false;
846 }
847 }
848 }
849 return true;
850 }
851
852 #endif //ASSERT
853
854 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.
855 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
856 ciMethod* cur_method = jvms->method();
857 int cur_bci = jvms->bci();
858 if (cur_method != nullptr && cur_bci != InvocationEntryBci) {
859 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
860 return Interpreter::bytecode_should_reexecute(code) ||
861 (is_anewarray && code == Bytecodes::_multianewarray);
862 // Reexecute _multianewarray bytecode which was replaced with
863 // sequence of [a]newarray. See Parse::do_multianewarray().
864 //
865 // Note: interpreter should not have it set since this optimization
866 // is limited by dimensions and guarded by flag so in some cases
867 // multianewarray() runtime calls will be generated and
868 // the bytecode should not be reexecutes (stack will not be reset).
869 } else {
870 return false;
871 }
872 }
873
874 // Helper function for adding JVMState and debug information to node
875 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
876 // Add the safepoint edges to the call (or other safepoint).
877
878 // Make sure dead locals are set to top. This
879 // should help register allocation time and cut down on the size
880 // of the deoptimization information.
881 assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
932 }
933
934 // Presize the call:
935 DEBUG_ONLY(uint non_debug_edges = call->req());
936 call->add_req_batch(top(), youngest_jvms->debug_depth());
937 assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
938
939 // Set up edges so that the call looks like this:
940 // Call [state:] ctl io mem fptr retadr
941 // [parms:] parm0 ... parmN
942 // [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
943 // [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
944 // [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
945 // Note that caller debug info precedes callee debug info.
946
947 // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
948 uint debug_ptr = call->req();
949
950 // Loop over the map input edges associated with jvms, add them
951 // to the call node, & reset all offsets to match call node array.
952 for (JVMState* in_jvms = youngest_jvms; in_jvms != nullptr; ) {
953 uint debug_end = debug_ptr;
954 uint debug_start = debug_ptr - in_jvms->debug_size();
955 debug_ptr = debug_start; // back up the ptr
956
957 uint p = debug_start; // walks forward in [debug_start, debug_end)
958 uint j, k, l;
959 SafePointNode* in_map = in_jvms->map();
960 out_jvms->set_map(call);
961
962 if (can_prune_locals) {
963 assert(in_jvms->method() == out_jvms->method(), "sanity");
964 // If the current throw can reach an exception handler in this JVMS,
965 // then we must keep everything live that can reach that handler.
966 // As a quick and dirty approximation, we look for any handlers at all.
967 if (in_jvms->method()->has_exception_handlers()) {
968 can_prune_locals = false;
969 }
970 }
971
972 // Add the Locals
973 k = in_jvms->locoff();
974 l = in_jvms->loc_size();
975 out_jvms->set_locoff(p);
976 if (!can_prune_locals) {
977 for (j = 0; j < l; j++)
978 call->set_req(p++, in_map->in(k+j));
979 } else {
980 p += l; // already set to top above by add_req_batch
981 }
982
983 // Add the Expression Stack
984 k = in_jvms->stkoff();
985 l = in_jvms->sp();
986 out_jvms->set_stkoff(p);
987 if (!can_prune_locals) {
988 for (j = 0; j < l; j++)
989 call->set_req(p++, in_map->in(k+j));
990 } else if (can_prune_locals && stack_slots_not_pruned != 0) {
991 // Divide stack into {S0,...,S1}, where S0 is set to top.
992 uint s1 = stack_slots_not_pruned;
993 stack_slots_not_pruned = 0; // for next iteration
994 if (s1 > l) s1 = l;
995 uint s0 = l - s1;
996 p += s0; // skip the tops preinstalled by add_req_batch
997 for (j = s0; j < l; j++)
998 call->set_req(p++, in_map->in(k+j));
999 } else {
1000 p += l; // already set to top above by add_req_batch
1001 }
1002
1003 // Add the Monitors
1004 k = in_jvms->monoff();
1005 l = in_jvms->mon_size();
1006 out_jvms->set_monoff(p);
1007 for (j = 0; j < l; j++)
1008 call->set_req(p++, in_map->in(k+j));
1009
1010 // Copy any scalar object fields.
1011 k = in_jvms->scloff();
1012 l = in_jvms->scl_size();
1013 out_jvms->set_scloff(p);
1014 for (j = 0; j < l; j++)
1015 call->set_req(p++, in_map->in(k+j));
1016
1017 // Finish the new jvms.
1018 out_jvms->set_endoff(p);
1019
1020 assert(out_jvms->endoff() == debug_end, "fill ptr must match");
1021 assert(out_jvms->depth() == in_jvms->depth(), "depth must match");
1022 assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match");
1023 assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match");
1024 assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match");
1025 assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
1026
1027 // Update the two tail pointers in parallel.
1028 out_jvms = out_jvms->caller();
1029 in_jvms = in_jvms->caller();
1030 }
1031
1032 assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
1033
1034 // Test the correctness of JVMState::debug_xxx accessors:
1035 assert(call->jvms()->debug_start() == non_debug_edges, "");
1036 assert(call->jvms()->debug_end() == call->req(), "");
1037 assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
1038 }
1039
1040 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
1041 Bytecodes::Code code = java_bc();
1042 if (code == Bytecodes::_wide) {
1043 code = method()->java_code_at_bci(bci() + 1);
1044 }
1045
1046 if (code != Bytecodes::_illegal) {
1047 depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
1183 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1184 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1185 return _gvn.transform( new AndLNode(conv, mask) );
1186 }
1187
1188 Node* GraphKit::ConvL2I(Node* offset) {
1189 // short-circuit a common case
1190 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1191 if (offset_con != (jlong)Type::OffsetBot) {
1192 return intcon((int) offset_con);
1193 }
1194 return _gvn.transform( new ConvL2INode(offset));
1195 }
1196
1197 //-------------------------load_object_klass-----------------------------------
1198 Node* GraphKit::load_object_klass(Node* obj) {
1199 // Special-case a fresh allocation to avoid building nodes:
1200 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1201 if (akls != nullptr) return akls;
1202 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1203 return _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS));
1204 }
1205
1206 //-------------------------load_array_length-----------------------------------
1207 Node* GraphKit::load_array_length(Node* array) {
1208 // Special-case a fresh allocation to avoid building nodes:
1209 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array);
1210 Node *alen;
1211 if (alloc == nullptr) {
1212 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1213 alen = _gvn.transform( new LoadRangeNode(nullptr, immutable_memory(), r_adr, TypeInt::POS));
1214 } else {
1215 alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false);
1216 }
1217 return alen;
1218 }
1219
1220 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc,
1221 const TypeOopPtr* oop_type,
1222 bool replace_length_in_map) {
1223 Node* length = alloc->Ideal_length();
1232 replace_in_map(length, ccast);
1233 }
1234 return ccast;
1235 }
1236 }
1237 return length;
1238 }
1239
1240 //------------------------------do_null_check----------------------------------
1241 // Helper function to do a null pointer check. Returned value is
1242 // the incoming address with null casted away. You are allowed to use the
1243 // not-null value only if you are control dependent on the test.
1244 #ifndef PRODUCT
1245 extern uint explicit_null_checks_inserted,
1246 explicit_null_checks_elided;
1247 #endif
1248 Node* GraphKit::null_check_common(Node* value, BasicType type,
1249 // optional arguments for variations:
1250 bool assert_null,
1251 Node* *null_control,
1252 bool speculative) {
1253 assert(!assert_null || null_control == nullptr, "not both at once");
1254 if (stopped()) return top();
1255 NOT_PRODUCT(explicit_null_checks_inserted++);
1256
1257 // Construct null check
1258 Node *chk = nullptr;
1259 switch(type) {
1260 case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1261 case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;
1262 case T_ARRAY : // fall through
1263 type = T_OBJECT; // simplify further tests
1264 case T_OBJECT : {
1265 const Type *t = _gvn.type( value );
1266
1267 const TypeOopPtr* tp = t->isa_oopptr();
1268 if (tp != nullptr && !tp->is_loaded()
1269 // Only for do_null_check, not any of its siblings:
1270 && !assert_null && null_control == nullptr) {
1271 // Usually, any field access or invocation on an unloaded oop type
1272 // will simply fail to link, since the statically linked class is
1273 // likely also to be unloaded. However, in -Xcomp mode, sometimes
1274 // the static class is loaded but the sharper oop type is not.
1275 // Rather than checking for this obscure case in lots of places,
1276 // we simply observe that a null check on an unloaded class
1340 }
1341 Node *oldcontrol = control();
1342 set_control(cfg);
1343 Node *res = cast_not_null(value);
1344 set_control(oldcontrol);
1345 NOT_PRODUCT(explicit_null_checks_elided++);
1346 return res;
1347 }
1348 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1349 if (cfg == nullptr) break; // Quit at region nodes
1350 depth++;
1351 }
1352 }
1353
1354 //-----------
1355 // Branch to failure if null
1356 float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
1357 Deoptimization::DeoptReason reason;
1358 if (assert_null) {
1359 reason = Deoptimization::reason_null_assert(speculative);
1360 } else if (type == T_OBJECT) {
1361 reason = Deoptimization::reason_null_check(speculative);
1362 } else {
1363 reason = Deoptimization::Reason_div0_check;
1364 }
1365 // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1366 // ciMethodData::has_trap_at will return a conservative -1 if any
1367 // must-be-null assertion has failed. This could cause performance
1368 // problems for a method after its first do_null_assert failure.
1369 // Consider using 'Reason_class_check' instead?
1370
1371 // To cause an implicit null check, we set the not-null probability
1372 // to the maximum (PROB_MAX). For an explicit check the probability
1373 // is set to a smaller value.
1374 if (null_control != nullptr || too_many_traps(reason)) {
1375 // probability is less likely
1376 ok_prob = PROB_LIKELY_MAG(3);
1377 } else if (!assert_null &&
1378 (ImplicitNullCheckThreshold > 0) &&
1379 method() != nullptr &&
1380 (method()->method_data()->trap_count(reason)
1414 }
1415
1416 if (assert_null) {
1417 // Cast obj to null on this path.
1418 replace_in_map(value, zerocon(type));
1419 return zerocon(type);
1420 }
1421
1422 // Cast obj to not-null on this path, if there is no null_control.
1423 // (If there is a null_control, a non-null value may come back to haunt us.)
1424 if (type == T_OBJECT) {
1425 Node* cast = cast_not_null(value, false);
1426 if (null_control == nullptr || (*null_control) == top())
1427 replace_in_map(value, cast);
1428 value = cast;
1429 }
1430
1431 return value;
1432 }
1433
1434
1435 //------------------------------cast_not_null----------------------------------
1436 // Cast obj to not-null on this path
1437 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1438 const Type *t = _gvn.type(obj);
1439 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1440 // Object is already not-null?
1441 if( t == t_not_null ) return obj;
1442
1443 Node* cast = new CastPPNode(control(), obj,t_not_null);
1444 cast = _gvn.transform( cast );
1445
1446 // Scan for instances of 'obj' in the current JVM mapping.
1447 // These instances are known to be not-null after the test.
1448 if (do_replace_in_map)
1449 replace_in_map(obj, cast);
1450
1451 return cast; // Return casted value
1452 }
1453
1454 // Sometimes in intrinsics, we implicitly know an object is not null
1455 // (there's no actual null check) so we can cast it to not null. In
1456 // the course of optimizations, the input to the cast can become null.
1457 // In that case that data path will die and we need the control path
1546 // These are layered on top of the factory methods in LoadNode and StoreNode,
1547 // and integrate with the parser's memory state and _gvn engine.
1548 //
1549
1550 // factory methods in "int adr_idx"
1551 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1552 MemNode::MemOrd mo,
1553 LoadNode::ControlDependency control_dependency,
1554 bool require_atomic_access,
1555 bool unaligned,
1556 bool mismatched,
1557 bool unsafe,
1558 uint8_t barrier_data) {
1559 int adr_idx = C->get_alias_index(_gvn.type(adr)->isa_ptr());
1560 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1561 const TypePtr* adr_type = nullptr; // debug-mode-only argument
1562 debug_only(adr_type = C->get_adr_type(adr_idx));
1563 Node* mem = memory(adr_idx);
1564 Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
1565 ld = _gvn.transform(ld);
1566 if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1567 // Improve graph before escape analysis and boxing elimination.
1568 record_for_igvn(ld);
1569 if (ld->is_DecodeN()) {
1570 // Also record the actual load (LoadN) in case ld is DecodeN. In some
1571 // rare corner cases, ld->in(1) can be something other than LoadN (e.g.,
1572 // a Phi). Recording such cases is still perfectly sound, but may be
1573 // unnecessary and result in some minor IGVN overhead.
1574 record_for_igvn(ld->in(1));
1575 }
1576 }
1577 return ld;
1578 }
1579
1580 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1581 MemNode::MemOrd mo,
1582 bool require_atomic_access,
1583 bool unaligned,
1584 bool mismatched,
1585 bool unsafe,
1599 if (unsafe) {
1600 st->as_Store()->set_unsafe_access();
1601 }
1602 st->as_Store()->set_barrier_data(barrier_data);
1603 st = _gvn.transform(st);
1604 set_memory(st, adr_idx);
1605 // Back-to-back stores can only remove intermediate store with DU info
1606 // so push on worklist for optimizer.
1607 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1608 record_for_igvn(st);
1609
1610 return st;
1611 }
1612
1613 Node* GraphKit::access_store_at(Node* obj,
1614 Node* adr,
1615 const TypePtr* adr_type,
1616 Node* val,
1617 const Type* val_type,
1618 BasicType bt,
1619 DecoratorSet decorators) {
1620 // Transformation of a value which could be null pointer (CastPP #null)
1621 // could be delayed during Parse (for example, in adjust_map_after_if()).
1622 // Execute transformation here to avoid barrier generation in such case.
1623 if (_gvn.type(val) == TypePtr::NULL_PTR) {
1624 val = _gvn.makecon(TypePtr::NULL_PTR);
1625 }
1626
1627 if (stopped()) {
1628 return top(); // Dead path ?
1629 }
1630
1631 assert(val != nullptr, "not dead path");
1632
1633 C2AccessValuePtr addr(adr, adr_type);
1634 C2AccessValue value(val, val_type);
1635 C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1636 if (access.is_raw()) {
1637 return _barrier_set->BarrierSetC2::store_at(access, value);
1638 } else {
1639 return _barrier_set->store_at(access, value);
1640 }
1641 }
1642
1643 Node* GraphKit::access_load_at(Node* obj, // containing obj
1644 Node* adr, // actual address to store val at
1645 const TypePtr* adr_type,
1646 const Type* val_type,
1647 BasicType bt,
1648 DecoratorSet decorators) {
1649 if (stopped()) {
1650 return top(); // Dead path ?
1651 }
1652
1653 C2AccessValuePtr addr(adr, adr_type);
1654 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr);
1655 if (access.is_raw()) {
1656 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1657 } else {
1658 return _barrier_set->load_at(access, val_type);
1659 }
1660 }
1661
1662 Node* GraphKit::access_load(Node* adr, // actual address to load val at
1663 const Type* val_type,
1664 BasicType bt,
1665 DecoratorSet decorators) {
1666 if (stopped()) {
1667 return top(); // Dead path ?
1668 }
1669
1670 C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());
1671 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, nullptr, addr);
1672 if (access.is_raw()) {
1673 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1674 } else {
1739 Node* new_val,
1740 const Type* value_type,
1741 BasicType bt,
1742 DecoratorSet decorators) {
1743 C2AccessValuePtr addr(adr, adr_type);
1744 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1745 if (access.is_raw()) {
1746 return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1747 } else {
1748 return _barrier_set->atomic_add_at(access, new_val, value_type);
1749 }
1750 }
1751
1752 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
1753 return _barrier_set->clone(this, src, dst, size, is_array);
1754 }
1755
1756 //-------------------------array_element_address-------------------------
1757 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1758 const TypeInt* sizetype, Node* ctrl) {
1759 uint shift = exact_log2(type2aelembytes(elembt));
1760 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1761
1762 // short-circuit a common case (saves lots of confusing waste motion)
1763 jint idx_con = find_int_con(idx, -1);
1764 if (idx_con >= 0) {
1765 intptr_t offset = header + ((intptr_t)idx_con << shift);
1766 return basic_plus_adr(ary, offset);
1767 }
1768
1769 // must be correct type for alignment purposes
1770 Node* base = basic_plus_adr(ary, header);
1771 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1772 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1773 return basic_plus_adr(ary, base, scale);
1774 }
1775
1776 //-------------------------load_array_element-------------------------
1777 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
1778 const Type* elemtype = arytype->elem();
1779 BasicType elembt = elemtype->array_element_basic_type();
1780 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1781 if (elembt == T_NARROWOOP) {
1782 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1783 }
1784 Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,
1785 IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0));
1786 return ld;
1787 }
1788
1789 //-------------------------set_arguments_for_java_call-------------------------
1790 // Arguments (pre-popped from the stack) are taken from the JVMS.
1791 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1792 // Add the call arguments:
1793 uint nargs = call->method()->arg_size();
1794 for (uint i = 0; i < nargs; i++) {
1795 Node* arg = argument(i);
1796 call->init_req(i + TypeFunc::Parms, arg);
1797 }
1798 }
1799
1800 //---------------------------set_edges_for_java_call---------------------------
1801 // Connect a newly created call into the current JVMS.
1802 // A return value node (if any) is returned from set_edges_for_java_call.
1803 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1804
1805 // Add the predefined inputs:
1806 call->init_req( TypeFunc::Control, control() );
1807 call->init_req( TypeFunc::I_O , i_o() );
1808 call->init_req( TypeFunc::Memory , reset_memory() );
1809 call->init_req( TypeFunc::FramePtr, frameptr() );
1810 call->init_req( TypeFunc::ReturnAdr, top() );
1811
1812 add_safepoint_edges(call, must_throw);
1813
1814 Node* xcall = _gvn.transform(call);
1815
1816 if (xcall == top()) {
1817 set_control(top());
1818 return;
1819 }
1820 assert(xcall == call, "call identity is stable");
1821
1822 // Re-use the current map to produce the result.
1823
1824 set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1825 set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj)));
1826 set_all_memory_call(xcall, separate_io_proj);
1827
1828 //return xcall; // no need, caller already has it
1829 }
1830
1831 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1832 if (stopped()) return top(); // maybe the call folded up?
1833
1834 // Capture the return value, if any.
1835 Node* ret;
1836 if (call->method() == nullptr ||
1837 call->method()->return_type()->basic_type() == T_VOID)
1838 ret = top();
1839 else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1840
1841 // Note: Since any out-of-line call can produce an exception,
1842 // we always insert an I_O projection from the call into the result.
1843
1844 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1845
1846 if (separate_io_proj) {
1847 // The caller requested separate projections be used by the fall
1848 // through and exceptional paths, so replace the projections for
1849 // the fall through path.
1850 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1851 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1852 }
1853 return ret;
1854 }
1855
1856 //--------------------set_predefined_input_for_runtime_call--------------------
1857 // Reading and setting the memory state is way conservative here.
1858 // The real problem is that I am not doing real Type analysis on memory,
1859 // so I cannot distinguish card mark stores from other stores. Across a GC
1860 // point the Store Barrier and the card mark memory has to agree. I cannot
1861 // have a card mark store and its barrier split across the GC point from
1862 // either above or below. Here I get that to happen by reading ALL of memory.
1863 // A better answer would be to separate out card marks from other memory.
1864 // For now, return the input memory state, so that it can be reused
1865 // after the call, if this call has restricted memory effects.
1866 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
1867 // Set fixed predefined input arguments
1868 Node* memory = reset_memory();
1869 Node* m = narrow_mem == nullptr ? memory : narrow_mem;
1870 call->init_req( TypeFunc::Control, control() );
1871 call->init_req( TypeFunc::I_O, top() ); // does no i/o
1872 call->init_req( TypeFunc::Memory, m ); // may gc ptrs
1923 if (use->is_MergeMem()) {
1924 wl.push(use);
1925 }
1926 }
1927 }
1928
1929 // Replace the call with the current state of the kit.
1930 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes, bool do_asserts) {
1931 JVMState* ejvms = nullptr;
1932 if (has_exceptions()) {
1933 ejvms = transfer_exceptions_into_jvms();
1934 }
1935
1936 ReplacedNodes replaced_nodes = map()->replaced_nodes();
1937 ReplacedNodes replaced_nodes_exception;
1938 Node* ex_ctl = top();
1939
1940 SafePointNode* final_state = stop();
1941
1942 // Find all the needed outputs of this call
1943 CallProjections callprojs;
1944 call->extract_projections(&callprojs, true, do_asserts);
1945
1946 Unique_Node_List wl;
1947 Node* init_mem = call->in(TypeFunc::Memory);
1948 Node* final_mem = final_state->in(TypeFunc::Memory);
1949 Node* final_ctl = final_state->in(TypeFunc::Control);
1950 Node* final_io = final_state->in(TypeFunc::I_O);
1951
1952 // Replace all the old call edges with the edges from the inlining result
1953 if (callprojs.fallthrough_catchproj != nullptr) {
1954 C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
1955 }
1956 if (callprojs.fallthrough_memproj != nullptr) {
1957 if (final_mem->is_MergeMem()) {
1958 // Parser's exits MergeMem was not transformed but may be optimized
1959 final_mem = _gvn.transform(final_mem);
1960 }
1961 C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem);
1962 add_mergemem_users_to_worklist(wl, final_mem);
1963 }
1964 if (callprojs.fallthrough_ioproj != nullptr) {
1965 C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io);
1966 }
1967
1968 // Replace the result with the new result if it exists and is used
1969 if (callprojs.resproj != nullptr && result != nullptr) {
1970 C->gvn_replace_by(callprojs.resproj, result);
1971 }
1972
1973 if (ejvms == nullptr) {
1974 // No exception edges to simply kill off those paths
1975 if (callprojs.catchall_catchproj != nullptr) {
1976 C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
1977 }
1978 if (callprojs.catchall_memproj != nullptr) {
1979 C->gvn_replace_by(callprojs.catchall_memproj, C->top());
1980 }
1981 if (callprojs.catchall_ioproj != nullptr) {
1982 C->gvn_replace_by(callprojs.catchall_ioproj, C->top());
1983 }
1984 // Replace the old exception object with top
1985 if (callprojs.exobj != nullptr) {
1986 C->gvn_replace_by(callprojs.exobj, C->top());
1987 }
1988 } else {
1989 GraphKit ekit(ejvms);
1990
1991 // Load my combined exception state into the kit, with all phis transformed:
1992 SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
1993 replaced_nodes_exception = ex_map->replaced_nodes();
1994
1995 Node* ex_oop = ekit.use_exception_state(ex_map);
1996
1997 if (callprojs.catchall_catchproj != nullptr) {
1998 C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
1999 ex_ctl = ekit.control();
2000 }
2001 if (callprojs.catchall_memproj != nullptr) {
2002 Node* ex_mem = ekit.reset_memory();
2003 C->gvn_replace_by(callprojs.catchall_memproj, ex_mem);
2004 add_mergemem_users_to_worklist(wl, ex_mem);
2005 }
2006 if (callprojs.catchall_ioproj != nullptr) {
2007 C->gvn_replace_by(callprojs.catchall_ioproj, ekit.i_o());
2008 }
2009
2010 // Replace the old exception object with the newly created one
2011 if (callprojs.exobj != nullptr) {
2012 C->gvn_replace_by(callprojs.exobj, ex_oop);
2013 }
2014 }
2015
2016 // Disconnect the call from the graph
2017 call->disconnect_inputs(C);
2018 C->gvn_replace_by(call, C->top());
2019
2020 // Clean up any MergeMems that feed other MergeMems since the
2021 // optimizer doesn't like that.
2022 while (wl.size() > 0) {
2023 _gvn.transform(wl.pop());
2024 }
2025
2026 if (callprojs.fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) {
2027 replaced_nodes.apply(C, final_ctl);
2028 }
2029 if (!ex_ctl->is_top() && do_replaced_nodes) {
2030 replaced_nodes_exception.apply(C, ex_ctl);
2031 }
2032 }
2033
2034
2035 //------------------------------increment_counter------------------------------
2036 // for statistics: increment a VM counter by 1
2037
2038 void GraphKit::increment_counter(address counter_addr) {
2039 Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2040 increment_counter(adr1);
2041 }
2042
2043 void GraphKit::increment_counter(Node* counter_addr) {
2044 Node* ctrl = control();
2045 Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, MemNode::unordered);
2046 Node* incr = _gvn.transform(new AddLNode(cnt, _gvn.longcon(1)));
2206 *
2207 * @param n node that the type applies to
2208 * @param exact_kls type from profiling
2209 * @param maybe_null did profiling see null?
2210 *
2211 * @return node with improved type
2212 */
2213 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2214 const Type* current_type = _gvn.type(n);
2215 assert(UseTypeSpeculation, "type speculation must be on");
2216
2217 const TypePtr* speculative = current_type->speculative();
2218
2219 // Should the klass from the profile be recorded in the speculative type?
2220 if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2221 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls, Type::trust_interfaces);
2222 const TypeOopPtr* xtype = tklass->as_instance_type();
2223 assert(xtype->klass_is_exact(), "Should be exact");
2224 // Any reason to believe n is not null (from this profiling or a previous one)?
2225 assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2226 const TypePtr* ptr = (ptr_kind == ProfileMaybeNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2227 // record the new speculative type's depth
2228 speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2229 speculative = speculative->with_inline_depth(jvms()->depth());
2230 } else if (current_type->would_improve_ptr(ptr_kind)) {
2231 // Profiling report that null was never seen so we can change the
2232 // speculative type to non null ptr.
2233 if (ptr_kind == ProfileAlwaysNull) {
2234 speculative = TypePtr::NULL_PTR;
2235 } else {
2236 assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2237 const TypePtr* ptr = TypePtr::NOTNULL;
2238 if (speculative != nullptr) {
2239 speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2240 } else {
2241 speculative = ptr;
2242 }
2243 }
2244 }
2245
2246 if (speculative != current_type->speculative()) {
2247 // Build a type with a speculative type (what we think we know
2248 // about the type but will need a guard when we use it)
2249 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative);
2250 // We're changing the type, we need a new CheckCast node to carry
2251 // the new type. The new type depends on the control: what
2252 // profiling tells us is only valid from here as far as we can
2253 // tell.
2254 Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2255 cast = _gvn.transform(cast);
2256 replace_in_map(n, cast);
2257 n = cast;
2258 }
2259
2260 return n;
2261 }
2262
2263 /**
2264 * Record profiling data from receiver profiling at an invoke with the
2265 * type system so that it can propagate it (speculation)
2266 *
2267 * @param n receiver node
2268 *
2269 * @return node with improved type
2270 */
2271 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2272 if (!UseTypeSpeculation) {
2273 return n;
2274 }
2275 ciKlass* exact_kls = profile_has_unique_klass();
2276 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2277 if ((java_bc() == Bytecodes::_checkcast ||
2278 java_bc() == Bytecodes::_instanceof ||
2279 java_bc() == Bytecodes::_aastore) &&
2280 method()->method_data()->is_mature()) {
2281 ciProfileData* data = method()->method_data()->bci_to_data(bci());
2282 if (data != nullptr) {
2283 if (!data->as_BitData()->null_seen()) {
2284 ptr_kind = ProfileNeverNull;
2285 } else {
2286 assert(data->is_ReceiverTypeData(), "bad profile data type");
2287 ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2288 uint i = 0;
2289 for (; i < call->row_limit(); i++) {
2290 ciKlass* receiver = call->receiver(i);
2291 if (receiver != nullptr) {
2292 break;
2293 }
2294 }
2295 ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2296 }
2297 }
2298 }
2299 return record_profile_for_speculation(n, exact_kls, ptr_kind);
2300 }
2301
2302 /**
2303 * Record profiling data from argument profiling at an invoke with the
2304 * type system so that it can propagate it (speculation)
2305 *
2306 * @param dest_method target method for the call
2307 * @param bc what invoke bytecode is this?
2308 */
2309 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2310 if (!UseTypeSpeculation) {
2311 return;
2312 }
2313 const TypeFunc* tf = TypeFunc::make(dest_method);
2314 int nargs = tf->domain()->cnt() - TypeFunc::Parms;
2315 int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2316 for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2317 const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
2318 if (is_reference_type(targ->basic_type())) {
2319 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2320 ciKlass* better_type = nullptr;
2321 if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2322 record_profile_for_speculation(argument(j), better_type, ptr_kind);
2323 }
2324 i++;
2325 }
2326 }
2327 }
2328
2329 /**
2330 * Record profiling data from parameter profiling at an invoke with
2331 * the type system so that it can propagate it (speculation)
2332 */
2333 void GraphKit::record_profiled_parameters_for_speculation() {
2334 if (!UseTypeSpeculation) {
2335 return;
2336 }
2337 for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
2351 * the type system so that it can propagate it (speculation)
2352 */
2353 void GraphKit::record_profiled_return_for_speculation() {
2354 if (!UseTypeSpeculation) {
2355 return;
2356 }
2357 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2358 ciKlass* better_type = nullptr;
2359 if (method()->return_profiled_type(bci(), better_type, ptr_kind)) {
2360 // If profiling reports a single type for the return value,
2361 // feed it to the type system so it can propagate it as a
2362 // speculative type
2363 record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind);
2364 }
2365 }
2366
2367 void GraphKit::round_double_arguments(ciMethod* dest_method) {
2368 if (Matcher::strict_fp_requires_explicit_rounding) {
2369 // (Note: TypeFunc::make has a cache that makes this fast.)
2370 const TypeFunc* tf = TypeFunc::make(dest_method);
2371 int nargs = tf->domain()->cnt() - TypeFunc::Parms;
2372 for (int j = 0; j < nargs; j++) {
2373 const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
2374 if (targ->basic_type() == T_DOUBLE) {
2375 // If any parameters are doubles, they must be rounded before
2376 // the call, dprecision_rounding does gvn.transform
2377 Node *arg = argument(j);
2378 arg = dprecision_rounding(arg);
2379 set_argument(j, arg);
2380 }
2381 }
2382 }
2383 }
2384
2385 // rounding for strict float precision conformance
2386 Node* GraphKit::precision_rounding(Node* n) {
2387 if (Matcher::strict_fp_requires_explicit_rounding) {
2388 #ifdef IA32
2389 if (UseSSE == 0) {
2390 return _gvn.transform(new RoundFloatNode(nullptr, n));
2391 }
2392 #else
2393 Unimplemented();
2502 // The first null ends the list.
2503 Node* parm0, Node* parm1,
2504 Node* parm2, Node* parm3,
2505 Node* parm4, Node* parm5,
2506 Node* parm6, Node* parm7) {
2507 assert(call_addr != nullptr, "must not call null targets");
2508
2509 // Slow-path call
2510 bool is_leaf = !(flags & RC_NO_LEAF);
2511 bool has_io = (!is_leaf && !(flags & RC_NO_IO));
2512 if (call_name == nullptr) {
2513 assert(!is_leaf, "must supply name for leaf");
2514 call_name = OptoRuntime::stub_name(call_addr);
2515 }
2516 CallNode* call;
2517 if (!is_leaf) {
2518 call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
2519 } else if (flags & RC_NO_FP) {
2520 call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2521 } else if (flags & RC_VECTOR){
2522 uint num_bits = call_type->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
2523 call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
2524 } else {
2525 call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2526 }
2527
2528 // The following is similar to set_edges_for_java_call,
2529 // except that the memory effects of the call are restricted to AliasIdxRaw.
2530
2531 // Slow path call has no side-effects, uses few values
2532 bool wide_in = !(flags & RC_NARROW_MEM);
2533 bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2534
2535 Node* prev_mem = nullptr;
2536 if (wide_in) {
2537 prev_mem = set_predefined_input_for_runtime_call(call);
2538 } else {
2539 assert(!wide_out, "narrow in => narrow out");
2540 Node* narrow_mem = memory(adr_type);
2541 prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2542 }
2582
2583 if (has_io) {
2584 set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2585 }
2586 return call;
2587
2588 }
2589
2590 // i2b
2591 Node* GraphKit::sign_extend_byte(Node* in) {
2592 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
2593 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
2594 }
2595
2596 // i2s
2597 Node* GraphKit::sign_extend_short(Node* in) {
2598 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
2599 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
2600 }
2601
2602 //------------------------------merge_memory-----------------------------------
2603 // Merge memory from one path into the current memory state.
2604 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
2605 for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
2606 Node* old_slice = mms.force_memory();
2607 Node* new_slice = mms.memory2();
2608 if (old_slice != new_slice) {
2609 PhiNode* phi;
2610 if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
2611 if (mms.is_empty()) {
2612 // clone base memory Phi's inputs for this memory slice
2613 assert(old_slice == mms.base_memory(), "sanity");
2614 phi = PhiNode::make(region, nullptr, Type::MEMORY, mms.adr_type(C));
2615 _gvn.set_type(phi, Type::MEMORY);
2616 for (uint i = 1; i < phi->req(); i++) {
2617 phi->init_req(i, old_slice->in(i));
2618 }
2619 } else {
2620 phi = old_slice->as_Phi(); // Phi was generated already
2621 }
2884
2885 // Now do a linear scan of the secondary super-klass array. Again, no real
2886 // performance impact (too rare) but it's gotta be done.
2887 // Since the code is rarely used, there is no penalty for moving it
2888 // out of line, and it can only improve I-cache density.
2889 // The decision to inline or out-of-line this final check is platform
2890 // dependent, and is found in the AD file definition of PartialSubtypeCheck.
2891 Node* psc = gvn.transform(
2892 new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
2893
2894 IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
2895 r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
2896 r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
2897
2898 // Return false path; set default control to true path.
2899 *ctrl = gvn.transform(r_ok_subtype);
2900 return gvn.transform(r_not_subtype);
2901 }
2902
2903 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
2904 bool expand_subtype_check = C->post_loop_opts_phase(); // macro node expansion is over
2905 if (expand_subtype_check) {
2906 MergeMemNode* mem = merged_memory();
2907 Node* ctrl = control();
2908 Node* subklass = obj_or_subklass;
2909 if (!_gvn.type(obj_or_subklass)->isa_klassptr()) {
2910 subklass = load_object_klass(obj_or_subklass);
2911 }
2912
2913 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn, method(), bci());
2914 set_control(ctrl);
2915 return n;
2916 }
2917
2918 Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass, method(), bci()));
2919 Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
2920 IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
2921 set_control(_gvn.transform(new IfTrueNode(iff)));
2922 return _gvn.transform(new IfFalseNode(iff));
2923 }
2924
2925 // Profile-driven exact type check:
2926 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
2927 float prob,
2928 Node* *casted_receiver) {
2929 assert(!klass->is_interface(), "no exact type check on interfaces");
2930
2931 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces);
2932 Node* recv_klass = load_object_klass(receiver);
2933 Node* want_klass = makecon(tklass);
2934 Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
2935 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
2936 IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
2937 set_control( _gvn.transform(new IfTrueNode (iff)));
2938 Node* fail = _gvn.transform(new IfFalseNode(iff));
2939
2940 if (!stopped()) {
2941 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
2942 const TypeOopPtr* recvx_type = tklass->as_instance_type();
2943 assert(recvx_type->klass_is_exact(), "");
2944
2945 if (!receiver_type->higher_equal(recvx_type)) { // ignore redundant casts
2946 // Subsume downstream occurrences of receiver with a cast to
2947 // recv_xtype, since now we know what the type will be.
2948 Node* cast = new CheckCastPPNode(control(), receiver, recvx_type);
2949 (*casted_receiver) = _gvn.transform(cast);
2950 assert(!(*casted_receiver)->is_top(), "that path should be unreachable");
2951 // (User must make the replace_in_map call.)
2952 }
2953 }
2954
2955 return fail;
2956 }
2957
2958 //------------------------------subtype_check_receiver-------------------------
2959 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
2960 Node** casted_receiver) {
2961 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces)->try_improve();
2962 Node* want_klass = makecon(tklass);
2963
2964 Node* slow_ctl = gen_subtype_check(receiver, want_klass);
2965
2966 // Ignore interface type information until interface types are properly tracked.
2967 if (!stopped() && !klass->is_interface()) {
2968 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
2969 const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
2970 if (!receiver_type->higher_equal(recv_type)) { // ignore redundant casts
2971 Node* cast = new CheckCastPPNode(control(), receiver, recv_type);
2972 (*casted_receiver) = _gvn.transform(cast);
2973 }
2974 }
2975
2976 return slow_ctl;
2977 }
2978
2979 //------------------------------seems_never_null-------------------------------
2980 // Use null_seen information if it is available from the profile.
2981 // If we see an unexpected null at a type check we record it and force a
2982 // recompile; the offending check will be recompiled to handle nulls.
2983 // If we see several offending BCIs, then all checks in the
2984 // method will be recompiled.
2985 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
2986 speculating = !_gvn.type(obj)->speculative_maybe_null();
2987 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
2988 if (UncommonNullCast // Cutout for this technique
2989 && obj != null() // And not the -Xcomp stupid case?
2990 && !too_many_traps(reason)
2991 ) {
2992 if (speculating) {
3061
3062 //------------------------maybe_cast_profiled_receiver-------------------------
3063 // If the profile has seen exactly one type, narrow to exactly that type.
3064 // Subsequent type checks will always fold up.
3065 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3066 const TypeKlassPtr* require_klass,
3067 ciKlass* spec_klass,
3068 bool safe_for_replace) {
3069 if (!UseTypeProfile || !TypeProfileCasts) return nullptr;
3070
3071 Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != nullptr);
3072
3073 // Make sure we haven't already deoptimized from this tactic.
3074 if (too_many_traps_or_recompiles(reason))
3075 return nullptr;
3076
3077 // (No, this isn't a call, but it's enough like a virtual call
3078 // to use the same ciMethod accessor to get the profile info...)
3079 // If we have a speculative type use it instead of profiling (which
3080 // may not help us)
3081 ciKlass* exact_kls = spec_klass == nullptr ? profile_has_unique_klass() : spec_klass;
3082 if (exact_kls != nullptr) {// no cast failures here
3083 if (require_klass == nullptr ||
3084 C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) {
3085 // If we narrow the type to match what the type profile sees or
3086 // the speculative type, we can then remove the rest of the
3087 // cast.
3088 // This is a win, even if the exact_kls is very specific,
3089 // because downstream operations, such as method calls,
3090 // will often benefit from the sharper type.
3091 Node* exact_obj = not_null_obj; // will get updated in place...
3092 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
3093 &exact_obj);
3094 { PreserveJVMState pjvms(this);
3095 set_control(slow_ctl);
3096 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3097 }
3098 if (safe_for_replace) {
3099 replace_in_map(not_null_obj, exact_obj);
3100 }
3101 return exact_obj;
3191 // If not_null_obj is dead, only null-path is taken
3192 if (stopped()) { // Doing instance-of on a null?
3193 set_control(null_ctl);
3194 return intcon(0);
3195 }
3196 region->init_req(_null_path, null_ctl);
3197 phi ->init_req(_null_path, intcon(0)); // Set null path value
3198 if (null_ctl == top()) {
3199 // Do this eagerly, so that pattern matches like is_diamond_phi
3200 // will work even during parsing.
3201 assert(_null_path == PATH_LIMIT-1, "delete last");
3202 region->del_req(_null_path);
3203 phi ->del_req(_null_path);
3204 }
3205
3206 // Do we know the type check always succeed?
3207 bool known_statically = false;
3208 if (_gvn.type(superklass)->singleton()) {
3209 const TypeKlassPtr* superk = _gvn.type(superklass)->is_klassptr();
3210 const TypeKlassPtr* subk = _gvn.type(obj)->is_oopptr()->as_klass_type();
3211 if (subk->is_loaded()) {
3212 int static_res = C->static_subtype_check(superk, subk);
3213 known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
3214 }
3215 }
3216
3217 if (!known_statically) {
3218 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3219 // We may not have profiling here or it may not help us. If we
3220 // have a speculative type use it to perform an exact cast.
3221 ciKlass* spec_obj_type = obj_type->speculative_type();
3222 if (spec_obj_type != nullptr || (ProfileDynamicTypes && data != nullptr)) {
3223 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, nullptr, spec_obj_type, safe_for_replace);
3224 if (stopped()) { // Profile disagrees with this path.
3225 set_control(null_ctl); // Null is the only remaining possibility.
3226 return intcon(0);
3227 }
3228 if (cast_obj != nullptr) {
3229 not_null_obj = cast_obj;
3230 }
3231 }
3247 record_for_igvn(region);
3248
3249 // If we know the type check always succeeds then we don't use the
3250 // profiling data at this bytecode. Don't lose it, feed it to the
3251 // type system as a speculative type.
3252 if (safe_for_replace) {
3253 Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3254 replace_in_map(obj, casted_obj);
3255 }
3256
3257 return _gvn.transform(phi);
3258 }
3259
3260 //-------------------------------gen_checkcast---------------------------------
3261 // Generate a checkcast idiom. Used by both the checkcast bytecode and the
3262 // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
3263 // uncommon-trap paths work. Adjust stack after this call.
3264 // If failure_control is supplied and not null, it is filled in with
3265 // the control edge for the cast failure. Otherwise, an appropriate
3266 // uncommon trap or exception is thrown.
3267 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
3268 Node* *failure_control) {
3269 kill_dead_locals(); // Benefit all the uncommon traps
3270 const TypeKlassPtr* klass_ptr_type = _gvn.type(superklass)->is_klassptr();
3271 const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve();
3272 const TypeOopPtr* toop = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type();
3273
3274 // Fast cutout: Check the case that the cast is vacuously true.
3275 // This detects the common cases where the test will short-circuit
3276 // away completely. We do this before we perform the null check,
3277 // because if the test is going to turn into zero code, we don't
3278 // want a residual null check left around. (Causes a slowdown,
3279 // for example, in some objArray manipulations, such as a[i]=a[j].)
3280 if (improved_klass_ptr_type->singleton()) {
3281 const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
3282 if (objtp != nullptr) {
3283 switch (C->static_subtype_check(improved_klass_ptr_type, objtp->as_klass_type())) {
3284 case Compile::SSC_always_true:
3285 // If we know the type check always succeed then we don't use
3286 // the profiling data at this bytecode. Don't lose it, feed it
3287 // to the type system as a speculative type.
3288 return record_profiled_receiver_for_speculation(obj);
3289 case Compile::SSC_always_false:
3290 // It needs a null check because a null will *pass* the cast check.
3291 // A non-null value will always produce an exception.
3292 if (!objtp->maybe_null()) {
3293 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3294 Deoptimization::DeoptReason reason = is_aastore ?
3295 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3296 builtin_throw(reason);
3297 return top();
3298 } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3299 return null_assert(obj);
3300 }
3301 break; // Fall through to full check
3302 default:
3303 break;
3304 }
3305 }
3306 }
3307
3308 ciProfileData* data = nullptr;
3309 bool safe_for_replace = false;
3310 if (failure_control == nullptr) { // use MDO in regular case only
3311 assert(java_bc() == Bytecodes::_aastore ||
3312 java_bc() == Bytecodes::_checkcast,
3313 "interpreter profiles type checks only for these BCs");
3314 data = method()->method_data()->bci_to_data(bci());
3315 safe_for_replace = true;
3316 }
3317
3318 // Make the merge point
3319 enum { _obj_path = 1, _null_path, PATH_LIMIT };
3320 RegionNode* region = new RegionNode(PATH_LIMIT);
3321 Node* phi = new PhiNode(region, toop);
3322 C->set_has_split_ifs(true); // Has chance for split-if optimization
3323
3324 // Use null-cast information if it is available
3325 bool speculative_not_null = false;
3326 bool never_see_null = ((failure_control == nullptr) // regular case only
3327 && seems_never_null(obj, data, speculative_not_null));
3328
3329 // Null check; get casted pointer; set region slot 3
3330 Node* null_ctl = top();
3331 Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3332
3333 // If not_null_obj is dead, only null-path is taken
3334 if (stopped()) { // Doing instance-of on a null?
3335 set_control(null_ctl);
3336 return null();
3337 }
3338 region->init_req(_null_path, null_ctl);
3339 phi ->init_req(_null_path, null()); // Set null path value
3340 if (null_ctl == top()) {
3341 // Do this eagerly, so that pattern matches like is_diamond_phi
3342 // will work even during parsing.
3343 assert(_null_path == PATH_LIMIT-1, "delete last");
3344 region->del_req(_null_path);
3345 phi ->del_req(_null_path);
3346 }
3347
3348 Node* cast_obj = nullptr;
3349 if (improved_klass_ptr_type->klass_is_exact()) {
3350 // The following optimization tries to statically cast the speculative type of the object
3351 // (for example obtained during profiling) to the type of the superklass and then do a
3352 // dynamic check that the type of the object is what we expect. To work correctly
3353 // for checkcast and aastore the type of superklass should be exact.
3354 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3355 // We may not have profiling here or it may not help us. If we have
3356 // a speculative type use it to perform an exact cast.
3357 ciKlass* spec_obj_type = obj_type->speculative_type();
3358 if (spec_obj_type != nullptr || data != nullptr) {
3359 cast_obj = maybe_cast_profiled_receiver(not_null_obj, improved_klass_ptr_type, spec_obj_type, safe_for_replace);
3360 if (cast_obj != nullptr) {
3361 if (failure_control != nullptr) // failure is now impossible
3362 (*failure_control) = top();
3363 // adjust the type of the phi to the exact klass:
3364 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3365 }
3366 }
3367 }
3368
3369 if (cast_obj == nullptr) {
3370 // Generate the subtype check
3371 Node* improved_superklass = superklass;
3372 if (improved_klass_ptr_type != klass_ptr_type && improved_klass_ptr_type->singleton()) {
3373 improved_superklass = makecon(improved_klass_ptr_type);
3374 }
3375 Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, improved_superklass);
3376
3377 // Plug in success path into the merge
3378 cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3379 // Failure path ends in uncommon trap (or may be dead - failure impossible)
3380 if (failure_control == nullptr) {
3381 if (not_subtype_ctrl != top()) { // If failure is possible
3382 PreserveJVMState pjvms(this);
3383 set_control(not_subtype_ctrl);
3384 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3385 Deoptimization::DeoptReason reason = is_aastore ?
3386 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3387 builtin_throw(reason);
3388 }
3389 } else {
3390 (*failure_control) = not_subtype_ctrl;
3391 }
3392 }
3393
3394 region->init_req(_obj_path, control());
3395 phi ->init_req(_obj_path, cast_obj);
3396
3397 // A merge of null or Casted-NotNull obj
3398 Node* res = _gvn.transform(phi);
3399
3400 // Note I do NOT always 'replace_in_map(obj,result)' here.
3401 // if( tk->klass()->can_be_primary_super() )
3402 // This means that if I successfully store an Object into an array-of-String
3403 // I 'forget' that the Object is really now known to be a String. I have to
3404 // do this because we don't have true union types for interfaces - if I store
3405 // a Baz into an array-of-Interface and then tell the optimizer it's an
3406 // Interface, I forget that it's also a Baz and cannot do Baz-like field
3407 // references to it. FIX THIS WHEN UNION TYPES APPEAR!
3408 // replace_in_map( obj, res );
3409
3410 // Return final merged results
3411 set_control( _gvn.transform(region) );
3412 record_for_igvn(region);
3413
3414 return record_profiled_receiver_for_speculation(res);
3415 }
3416
3417 //------------------------------next_monitor-----------------------------------
3418 // What number should be given to the next monitor?
3419 int GraphKit::next_monitor() {
3420 int current = jvms()->monitor_depth()* C->sync_stack_slots();
3421 int next = current + C->sync_stack_slots();
3422 // Keep the toplevel high water mark current:
3423 if (C->fixed_slots() < next) C->set_fixed_slots(next);
3424 return current;
3425 }
3426
3427 //------------------------------insert_mem_bar---------------------------------
3428 // Memory barrier to avoid floating things around
3429 // The membar serves as a pinch point between both control and all memory slices.
3430 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3431 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3432 mb->init_req(TypeFunc::Control, control());
3433 mb->init_req(TypeFunc::Memory, reset_memory());
3434 Node* membar = _gvn.transform(mb);
3462 }
3463 Node* membar = _gvn.transform(mb);
3464 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3465 if (alias_idx == Compile::AliasIdxBot) {
3466 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3467 } else {
3468 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3469 }
3470 return membar;
3471 }
3472
3473 //------------------------------shared_lock------------------------------------
3474 // Emit locking code.
3475 FastLockNode* GraphKit::shared_lock(Node* obj) {
3476 // bci is either a monitorenter bc or InvocationEntryBci
3477 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3478 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3479
3480 if( !GenerateSynchronizationCode )
3481 return nullptr; // Not locking things?
3482 if (stopped()) // Dead monitor?
3483 return nullptr;
3484
3485 assert(dead_locals_are_killed(), "should kill locals before sync. point");
3486
3487 // Box the stack location
3488 Node* box = new BoxLockNode(next_monitor());
3489 // Check for bailout after new BoxLockNode
3490 if (failing()) { return nullptr; }
3491 box = _gvn.transform(box);
3492 Node* mem = reset_memory();
3493
3494 FastLockNode * flock = _gvn.transform(new FastLockNode(nullptr, obj, box) )->as_FastLock();
3495
3496 // Add monitor to debug info for the slow path. If we block inside the
3497 // slow path and de-opt, we need the monitor hanging around
3498 map()->push_monitor( flock );
3499
3500 const TypeFunc *tf = LockNode::lock_type();
3501 LockNode *lock = new LockNode(C, tf);
3530 }
3531 #endif
3532
3533 return flock;
3534 }
3535
3536
3537 //------------------------------shared_unlock----------------------------------
3538 // Emit unlocking code.
3539 void GraphKit::shared_unlock(Node* box, Node* obj) {
3540 // bci is either a monitorenter bc or InvocationEntryBci
3541 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3542 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3543
3544 if( !GenerateSynchronizationCode )
3545 return;
3546 if (stopped()) { // Dead monitor?
3547 map()->pop_monitor(); // Kill monitor from debug info
3548 return;
3549 }
3550
3551 // Memory barrier to avoid floating things down past the locked region
3552 insert_mem_bar(Op_MemBarReleaseLock);
3553
3554 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3555 UnlockNode *unlock = new UnlockNode(C, tf);
3556 #ifdef ASSERT
3557 unlock->set_dbg_jvms(sync_jvms());
3558 #endif
3559 uint raw_idx = Compile::AliasIdxRaw;
3560 unlock->init_req( TypeFunc::Control, control() );
3561 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3562 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3563 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3564 unlock->init_req( TypeFunc::ReturnAdr, top() );
3565
3566 unlock->init_req(TypeFunc::Parms + 0, obj);
3567 unlock->init_req(TypeFunc::Parms + 1, box);
3568 unlock = _gvn.transform(unlock)->as_Unlock();
3569
3570 Node* mem = reset_memory();
3571
3572 // unlock has no side-effects, sets few values
3573 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3574
3575 // Kill monitor from debug info
3576 map()->pop_monitor( );
3577 }
3578
3579 //-------------------------------get_layout_helper-----------------------------
3580 // If the given klass is a constant or known to be an array,
3581 // fetch the constant layout helper value into constant_value
3582 // and return null. Otherwise, load the non-constant
3583 // layout helper value, and return the node which represents it.
3584 // This two-faced routine is useful because allocation sites
3585 // almost always feature constant types.
3586 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3587 const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
3588 if (!StressReflectiveCode && klass_t != nullptr) {
3589 bool xklass = klass_t->klass_is_exact();
3590 if (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM)) {
3591 jint lhelper;
3592 if (klass_t->isa_aryklassptr()) {
3593 BasicType elem = klass_t->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type();
3594 if (is_reference_type(elem, true)) {
3595 elem = T_OBJECT;
3596 }
3597 lhelper = Klass::array_layout_helper(elem);
3598 } else {
3599 lhelper = klass_t->is_instklassptr()->exact_klass()->layout_helper();
3600 }
3601 if (lhelper != Klass::_lh_neutral_value) {
3602 constant_value = lhelper;
3603 return (Node*) nullptr;
3604 }
3605 }
3606 }
3607 constant_value = Klass::_lh_neutral_value; // put in a known value
3608 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3609 return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered);
3610 }
3611
3612 // We just put in an allocate/initialize with a big raw-memory effect.
3613 // Hook selected additional alias categories on the initialization.
3614 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3615 MergeMemNode* init_in_merge,
3616 Node* init_out_raw) {
3617 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
3618 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
3619
3620 Node* prevmem = kit.memory(alias_idx);
3621 init_in_merge->set_memory_at(alias_idx, prevmem);
3622 kit.set_memory(init_out_raw, alias_idx);
3623 }
3624
3625 //---------------------------set_output_for_allocation-------------------------
3626 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
3627 const TypeOopPtr* oop_type,
3628 bool deoptimize_on_exception) {
3629 int rawidx = Compile::AliasIdxRaw;
3630 alloc->set_req( TypeFunc::FramePtr, frameptr() );
3631 add_safepoint_edges(alloc);
3632 Node* allocx = _gvn.transform(alloc);
3633 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
3634 // create memory projection for i_o
3635 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
3636 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
3637
3638 // create a memory projection as for the normal control path
3639 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3640 set_memory(malloc, rawidx);
3641
3642 // a normal slow-call doesn't change i_o, but an allocation does
3643 // we create a separate i_o projection for the normal control path
3644 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3645 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3646
3647 // put in an initialization barrier
3648 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
3649 rawoop)->as_Initialize();
3650 assert(alloc->initialization() == init, "2-way macro link must work");
3651 assert(init ->allocation() == alloc, "2-way macro link must work");
3652 {
3653 // Extract memory strands which may participate in the new object's
3654 // initialization, and source them from the new InitializeNode.
3655 // This will allow us to observe initializations when they occur,
3656 // and link them properly (as a group) to the InitializeNode.
3657 assert(init->in(InitializeNode::Memory) == malloc, "");
3658 MergeMemNode* minit_in = MergeMemNode::make(malloc);
3659 init->set_req(InitializeNode::Memory, minit_in);
3660 record_for_igvn(minit_in); // fold it up later, if possible
3661 Node* minit_out = memory(rawidx);
3662 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
3663 // Add an edge in the MergeMem for the header fields so an access
3664 // to one of those has correct memory state
3665 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
3666 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
3667 if (oop_type->isa_aryptr()) {
3668 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
3669 int elemidx = C->get_alias_index(telemref);
3670 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
3671 } else if (oop_type->isa_instptr()) {
3672 ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
3673 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
3674 ciField* field = ik->nonstatic_field_at(i);
3675 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
3676 continue; // do not bother to track really large numbers of fields
3677 // Find (or create) the alias category for this field:
3678 int fieldidx = C->alias_type(field)->index();
3679 hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3680 }
3681 }
3682 }
3683
3684 // Cast raw oop to the real thing...
3685 Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
3686 javaoop = _gvn.transform(javaoop);
3687 C->set_recent_alloc(control(), javaoop);
3688 assert(just_allocated_object(control()) == javaoop, "just allocated");
3689
3690 #ifdef ASSERT
3691 { // Verify that the AllocateNode::Ideal_allocation recognizers work:
3702 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
3703 }
3704 }
3705 #endif //ASSERT
3706
3707 return javaoop;
3708 }
3709
3710 //---------------------------new_instance--------------------------------------
3711 // This routine takes a klass_node which may be constant (for a static type)
3712 // or may be non-constant (for reflective code). It will work equally well
3713 // for either, and the graph will fold nicely if the optimizer later reduces
3714 // the type to a constant.
3715 // The optional arguments are for specialized use by intrinsics:
3716 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3717 // - If 'return_size_val', report the total object size to the caller.
3718 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3719 Node* GraphKit::new_instance(Node* klass_node,
3720 Node* extra_slow_test,
3721 Node* *return_size_val,
3722 bool deoptimize_on_exception) {
3723 // Compute size in doublewords
3724 // The size is always an integral number of doublewords, represented
3725 // as a positive bytewise size stored in the klass's layout_helper.
3726 // The layout_helper also encodes (in a low bit) the need for a slow path.
3727 jint layout_con = Klass::_lh_neutral_value;
3728 Node* layout_val = get_layout_helper(klass_node, layout_con);
3729 int layout_is_con = (layout_val == nullptr);
3730
3731 if (extra_slow_test == nullptr) extra_slow_test = intcon(0);
3732 // Generate the initial go-slow test. It's either ALWAYS (return a
3733 // Node for 1) or NEVER (return a null) or perhaps (in the reflective
3734 // case) a computed value derived from the layout_helper.
3735 Node* initial_slow_test = nullptr;
3736 if (layout_is_con) {
3737 assert(!StressReflectiveCode, "stress mode does not use these paths");
3738 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3739 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3740 } else { // reflective case
3741 // This reflective path is used by Unsafe.allocateInstance.
3742 // (It may be stress-tested by specifying StressReflectiveCode.)
3743 // Basically, we want to get into the VM is there's an illegal argument.
3744 Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
3745 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
3746 if (extra_slow_test != intcon(0)) {
3747 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
3748 }
3749 // (Macro-expander will further convert this to a Bool, if necessary.)
3760
3761 // Clear the low bits to extract layout_helper_size_in_bytes:
3762 assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
3763 Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
3764 size = _gvn.transform( new AndXNode(size, mask) );
3765 }
3766 if (return_size_val != nullptr) {
3767 (*return_size_val) = size;
3768 }
3769
3770 // This is a precise notnull oop of the klass.
3771 // (Actually, it need not be precise if this is a reflective allocation.)
3772 // It's what we cast the result to.
3773 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
3774 if (!tklass) tklass = TypeInstKlassPtr::OBJECT;
3775 const TypeOopPtr* oop_type = tklass->as_instance_type();
3776
3777 // Now generate allocation code
3778
3779 // The entire memory state is needed for slow path of the allocation
3780 // since GC and deoptimization can happened.
3781 Node *mem = reset_memory();
3782 set_all_memory(mem); // Create new memory state
3783
3784 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3785 control(), mem, i_o(),
3786 size, klass_node,
3787 initial_slow_test);
3788
3789 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
3790 }
3791
3792 //-------------------------------new_array-------------------------------------
3793 // helper for both newarray and anewarray
3794 // The 'length' parameter is (obviously) the length of the array.
3795 // The optional arguments are for specialized use by intrinsics:
3796 // - If 'return_size_val', report the non-padded array size (sum of header size
3797 // and array body) to the caller.
3798 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3799 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3800 Node* length, // number of array elements
3801 int nargs, // number of arguments to push back for uncommon trap
3802 Node* *return_size_val,
3803 bool deoptimize_on_exception) {
3804 jint layout_con = Klass::_lh_neutral_value;
3805 Node* layout_val = get_layout_helper(klass_node, layout_con);
3806 int layout_is_con = (layout_val == nullptr);
3807
3808 if (!layout_is_con && !StressReflectiveCode &&
3809 !too_many_traps(Deoptimization::Reason_class_check)) {
3810 // This is a reflective array creation site.
3811 // Optimistically assume that it is a subtype of Object[],
3812 // so that we can fold up all the address arithmetic.
3813 layout_con = Klass::array_layout_helper(T_OBJECT);
3814 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
3815 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
3816 { BuildCutout unless(this, bol_lh, PROB_MAX);
3817 inc_sp(nargs);
3818 uncommon_trap(Deoptimization::Reason_class_check,
3819 Deoptimization::Action_maybe_recompile);
3820 }
3821 layout_val = nullptr;
3822 layout_is_con = true;
3823 }
3824
3825 // Generate the initial go-slow test. Make sure we do not overflow
3826 // if length is huge (near 2Gig) or negative! We do not need
3827 // exact double-words here, just a close approximation of needed
3828 // double-words. We can't add any offset or rounding bits, lest we
3829 // take a size -1 of bytes and make it positive. Use an unsigned
3830 // compare, so negative sizes look hugely positive.
3831 int fast_size_limit = FastAllocateSizeLimit;
3832 if (layout_is_con) {
3833 assert(!StressReflectiveCode, "stress mode does not use these paths");
3834 // Increase the size limit if we have exact knowledge of array type.
3835 int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
3836 fast_size_limit <<= (LogBytesPerLong - log2_esize);
3837 }
3838
3839 Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
3840 Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
3841
3842 // --- Size Computation ---
3843 // array_size = round_to_heap(array_header + (length << elem_shift));
3844 // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
3845 // and align_to(x, y) == ((x + y-1) & ~(y-1))
3846 // The rounding mask is strength-reduced, if possible.
3847 int round_mask = MinObjAlignmentInBytes - 1;
3848 Node* header_size = nullptr;
3849 // (T_BYTE has the weakest alignment and size restrictions...)
3850 if (layout_is_con) {
3851 int hsize = Klass::layout_helper_header_size(layout_con);
3852 int eshift = Klass::layout_helper_log2_element_size(layout_con);
3853 if ((round_mask & ~right_n_bits(eshift)) == 0)
3854 round_mask = 0; // strength-reduce it if it goes away completely
3855 assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
3856 int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
3857 assert(header_size_min <= hsize, "generic minimum is smallest");
3858 header_size = intcon(hsize);
3859 } else {
3860 Node* hss = intcon(Klass::_lh_header_size_shift);
3861 Node* hsm = intcon(Klass::_lh_header_size_mask);
3862 header_size = _gvn.transform(new URShiftINode(layout_val, hss));
3863 header_size = _gvn.transform(new AndINode(header_size, hsm));
3864 }
3865
3866 Node* elem_shift = nullptr;
3867 if (layout_is_con) {
3868 int eshift = Klass::layout_helper_log2_element_size(layout_con);
3869 if (eshift != 0)
3870 elem_shift = intcon(eshift);
3871 } else {
3872 // There is no need to mask or shift this value.
3873 // The semantics of LShiftINode include an implicit mask to 0x1F.
3874 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
3875 elem_shift = layout_val;
3922 }
3923 Node* non_rounded_size = _gvn.transform(new AddXNode(headerx, abody));
3924
3925 if (return_size_val != nullptr) {
3926 // This is the size
3927 (*return_size_val) = non_rounded_size;
3928 }
3929
3930 Node* size = non_rounded_size;
3931 if (round_mask != 0) {
3932 Node* mask1 = MakeConX(round_mask);
3933 size = _gvn.transform(new AddXNode(size, mask1));
3934 Node* mask2 = MakeConX(~round_mask);
3935 size = _gvn.transform(new AndXNode(size, mask2));
3936 }
3937 // else if round_mask == 0, the size computation is self-rounding
3938
3939 // Now generate allocation code
3940
3941 // The entire memory state is needed for slow path of the allocation
3942 // since GC and deoptimization can happened.
3943 Node *mem = reset_memory();
3944 set_all_memory(mem); // Create new memory state
3945
3946 if (initial_slow_test->is_Bool()) {
3947 // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
3948 initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
3949 }
3950
3951 const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3952 Node* valid_length_test = _gvn.intcon(1);
3953 if (ary_type->isa_aryptr()) {
3954 BasicType bt = ary_type->isa_aryptr()->elem()->array_element_basic_type();
3955 jint max = TypeAryPtr::max_array_length(bt);
3956 Node* valid_length_cmp = _gvn.transform(new CmpUNode(length, intcon(max)));
3957 valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le));
3958 }
3959
3960 // Create the AllocateArrayNode and its result projections
3961 AllocateArrayNode* alloc
3962 = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3963 control(), mem, i_o(),
3964 size, klass_node,
3965 initial_slow_test,
3966 length, valid_length_test);
3967
3968 // Cast to correct type. Note that the klass_node may be constant or not,
3969 // and in the latter case the actual array type will be inexact also.
3970 // (This happens via a non-constant argument to inline_native_newArray.)
3971 // In any case, the value of klass_node provides the desired array type.
3972 const TypeInt* length_type = _gvn.find_int_type(length);
3973 if (ary_type->isa_aryptr() && length_type != nullptr) {
3974 // Try to get a better type than POS for the size
3975 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3976 }
3977
3978 Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
3979
3980 array_ideal_length(alloc, ary_type, true);
3981 return javaoop;
3982 }
3983
3984 // The following "Ideal_foo" functions are placed here because they recognize
3985 // the graph shapes created by the functions immediately above.
3986
3987 //---------------------------Ideal_allocation----------------------------------
4095 set_all_memory(ideal.merged_memory());
4096 set_i_o(ideal.i_o());
4097 set_control(ideal.ctrl());
4098 }
4099
4100 void GraphKit::final_sync(IdealKit& ideal) {
4101 // Final sync IdealKit and graphKit.
4102 sync_kit(ideal);
4103 }
4104
4105 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4106 Node* len = load_array_length(load_String_value(str, set_ctrl));
4107 Node* coder = load_String_coder(str, set_ctrl);
4108 // Divide length by 2 if coder is UTF16
4109 return _gvn.transform(new RShiftINode(len, coder));
4110 }
4111
4112 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4113 int value_offset = java_lang_String::value_offset();
4114 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4115 false, nullptr, 0);
4116 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4117 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4118 TypeAry::make(TypeInt::BYTE, TypeInt::POS),
4119 ciTypeArrayKlass::make(T_BYTE), true, 0);
4120 Node* p = basic_plus_adr(str, str, value_offset);
4121 Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4122 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4123 return load;
4124 }
4125
4126 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4127 if (!CompactStrings) {
4128 return intcon(java_lang_String::CODER_UTF16);
4129 }
4130 int coder_offset = java_lang_String::coder_offset();
4131 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4132 false, nullptr, 0);
4133 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4134
4135 Node* p = basic_plus_adr(str, str, coder_offset);
4136 Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4137 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4138 return load;
4139 }
4140
4141 void GraphKit::store_String_value(Node* str, Node* value) {
4142 int value_offset = java_lang_String::value_offset();
4143 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4144 false, nullptr, 0);
4145 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4146
4147 access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,
4148 value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4149 }
4150
4151 void GraphKit::store_String_coder(Node* str, Node* value) {
4152 int coder_offset = java_lang_String::coder_offset();
4153 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4154 false, nullptr, 0);
4155 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4156
4157 access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4158 value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4159 }
4160
4161 // Capture src and dst memory state with a MergeMemNode
4162 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4163 if (src_type == dst_type) {
4164 // Types are equal, we don't need a MergeMemNode
4165 return memory(src_type);
4166 }
4167 MergeMemNode* merge = MergeMemNode::make(map()->memory());
4168 record_for_igvn(merge); // fold it up later, if possible
4169 int src_idx = C->get_alias_index(src_type);
4170 int dst_idx = C->get_alias_index(dst_type);
4171 merge->set_memory_at(src_idx, memory(src_idx));
4172 merge->set_memory_at(dst_idx, memory(dst_idx));
4173 return merge;
4174 }
4247 i_char->init_req(2, AddI(i_char, intcon(2)));
4248
4249 set_control(IfFalse(iff));
4250 set_memory(st, TypeAryPtr::BYTES);
4251 }
4252
4253 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4254 if (!field->is_constant()) {
4255 return nullptr; // Field not marked as constant.
4256 }
4257 ciInstance* holder = nullptr;
4258 if (!field->is_static()) {
4259 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4260 if (const_oop != nullptr && const_oop->is_instance()) {
4261 holder = const_oop->as_instance();
4262 }
4263 }
4264 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4265 /*is_unsigned_load=*/false);
4266 if (con_type != nullptr) {
4267 return makecon(con_type);
4268 }
4269 return nullptr;
4270 }
4271
4272 Node* GraphKit::maybe_narrow_object_type(Node* obj, ciKlass* type) {
4273 const TypeOopPtr* obj_type = obj->bottom_type()->isa_oopptr();
4274 const TypeOopPtr* sig_type = TypeOopPtr::make_from_klass(type);
4275 if (obj_type != nullptr && sig_type->is_loaded() && !obj_type->higher_equal(sig_type)) {
4276 const Type* narrow_obj_type = obj_type->filter_speculative(sig_type); // keep speculative part
4277 Node* casted_obj = gvn().transform(new CheckCastPPNode(control(), obj, narrow_obj_type));
4278 return casted_obj;
4279 }
4280 return obj;
4281 }
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciFlatArrayKlass.hpp"
26 #include "ci/ciInlineKlass.hpp"
27 #include "ci/ciUtilities.hpp"
28 #include "classfile/javaClasses.hpp"
29 #include "ci/ciObjArray.hpp"
30 #include "asm/register.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "gc/shared/barrierSet.hpp"
33 #include "gc/shared/c2/barrierSetC2.hpp"
34 #include "interpreter/interpreter.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/castnode.hpp"
38 #include "opto/convertnode.hpp"
39 #include "opto/graphKit.hpp"
40 #include "opto/idealKit.hpp"
41 #include "opto/inlinetypenode.hpp"
42 #include "opto/intrinsicnode.hpp"
43 #include "opto/locknode.hpp"
44 #include "opto/machnode.hpp"
45 #include "opto/narrowptrnode.hpp"
46 #include "opto/opaquenode.hpp"
47 #include "opto/parse.hpp"
48 #include "opto/rootnode.hpp"
49 #include "opto/runtime.hpp"
50 #include "opto/subtypenode.hpp"
51 #include "runtime/deoptimization.hpp"
52 #include "runtime/sharedRuntime.hpp"
53 #include "utilities/bitMap.inline.hpp"
54 #include "utilities/powerOfTwo.hpp"
55 #include "utilities/growableArray.hpp"
56
57 //----------------------------GraphKit-----------------------------------------
58 // Main utility constructor.
59 GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn)
60 : Phase(Phase::Parser),
61 _env(C->env()),
62 _gvn((gvn != nullptr) ? *gvn : *C->initial_gvn()),
63 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
64 {
65 assert(gvn == nullptr || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled");
66 _exceptions = jvms->map()->next_exception();
67 if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr);
68 set_jvms(jvms);
69 #ifdef ASSERT
70 if (_gvn.is_IterGVN() != nullptr) {
71 assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used");
72 // Save the initial size of _for_igvn worklist for verification (see ~GraphKit)
73 _worklist_size = _gvn.C->igvn_worklist()->size();
74 }
75 #endif
76 }
77
78 // Private constructor for parser.
79 GraphKit::GraphKit()
80 : Phase(Phase::Parser),
81 _env(C->env()),
82 _gvn(*C->initial_gvn()),
83 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
84 {
85 _exceptions = nullptr;
86 set_map(nullptr);
87 debug_only(_sp = -99);
88 debug_only(set_bci(-99));
89 }
90
91
92
93 //---------------------------clean_stack---------------------------------------
94 // Clear away rubbish from the stack area of the JVM state.
95 // This destroys any arguments that may be waiting on the stack.
853 if (PrintMiscellaneous && (Verbose || WizardMode)) {
854 tty->print_cr("Zombie local %d: ", local);
855 jvms->dump();
856 }
857 return false;
858 }
859 }
860 }
861 return true;
862 }
863
864 #endif //ASSERT
865
866 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.
867 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
868 ciMethod* cur_method = jvms->method();
869 int cur_bci = jvms->bci();
870 if (cur_method != nullptr && cur_bci != InvocationEntryBci) {
871 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
872 return Interpreter::bytecode_should_reexecute(code) ||
873 (is_anewarray && (code == Bytecodes::_multianewarray));
874 // Reexecute _multianewarray bytecode which was replaced with
875 // sequence of [a]newarray. See Parse::do_multianewarray().
876 //
877 // Note: interpreter should not have it set since this optimization
878 // is limited by dimensions and guarded by flag so in some cases
879 // multianewarray() runtime calls will be generated and
880 // the bytecode should not be reexecutes (stack will not be reset).
881 } else {
882 return false;
883 }
884 }
885
886 // Helper function for adding JVMState and debug information to node
887 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
888 // Add the safepoint edges to the call (or other safepoint).
889
890 // Make sure dead locals are set to top. This
891 // should help register allocation time and cut down on the size
892 // of the deoptimization information.
893 assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
944 }
945
946 // Presize the call:
947 DEBUG_ONLY(uint non_debug_edges = call->req());
948 call->add_req_batch(top(), youngest_jvms->debug_depth());
949 assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
950
951 // Set up edges so that the call looks like this:
952 // Call [state:] ctl io mem fptr retadr
953 // [parms:] parm0 ... parmN
954 // [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
955 // [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
956 // [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
957 // Note that caller debug info precedes callee debug info.
958
959 // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
960 uint debug_ptr = call->req();
961
962 // Loop over the map input edges associated with jvms, add them
963 // to the call node, & reset all offsets to match call node array.
964
965 JVMState* callee_jvms = nullptr;
966 for (JVMState* in_jvms = youngest_jvms; in_jvms != nullptr; ) {
967 uint debug_end = debug_ptr;
968 uint debug_start = debug_ptr - in_jvms->debug_size();
969 debug_ptr = debug_start; // back up the ptr
970
971 uint p = debug_start; // walks forward in [debug_start, debug_end)
972 uint j, k, l;
973 SafePointNode* in_map = in_jvms->map();
974 out_jvms->set_map(call);
975
976 if (can_prune_locals) {
977 assert(in_jvms->method() == out_jvms->method(), "sanity");
978 // If the current throw can reach an exception handler in this JVMS,
979 // then we must keep everything live that can reach that handler.
980 // As a quick and dirty approximation, we look for any handlers at all.
981 if (in_jvms->method()->has_exception_handlers()) {
982 can_prune_locals = false;
983 }
984 }
985
986 // Add the Locals
987 k = in_jvms->locoff();
988 l = in_jvms->loc_size();
989 out_jvms->set_locoff(p);
990 if (!can_prune_locals) {
991 for (j = 0; j < l; j++) {
992 Node* val = in_map->in(k + j);
993 // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state
994 if (callee_jvms != nullptr && val->is_InlineType() && val->as_InlineType()->is_larval() &&
995 callee_jvms->method()->is_object_constructor() && val == in_map->argument(in_jvms, 0) &&
996 val->bottom_type()->is_inlinetypeptr()) {
997 val = callee_jvms->map()->local(callee_jvms, 0); // Receiver
998 }
999 call->set_req(p++, val);
1000 }
1001 } else {
1002 p += l; // already set to top above by add_req_batch
1003 }
1004
1005 // Add the Expression Stack
1006 k = in_jvms->stkoff();
1007 l = in_jvms->sp();
1008 out_jvms->set_stkoff(p);
1009 if (!can_prune_locals) {
1010 for (j = 0; j < l; j++) {
1011 Node* val = in_map->in(k + j);
1012 // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state
1013 if (callee_jvms != nullptr && val->is_InlineType() && val->as_InlineType()->is_larval() &&
1014 callee_jvms->method()->is_object_constructor() && val == in_map->argument(in_jvms, 0) &&
1015 val->bottom_type()->is_inlinetypeptr()) {
1016 val = callee_jvms->map()->local(callee_jvms, 0); // Receiver
1017 }
1018 call->set_req(p++, val);
1019 }
1020 } else if (can_prune_locals && stack_slots_not_pruned != 0) {
1021 // Divide stack into {S0,...,S1}, where S0 is set to top.
1022 uint s1 = stack_slots_not_pruned;
1023 stack_slots_not_pruned = 0; // for next iteration
1024 if (s1 > l) s1 = l;
1025 uint s0 = l - s1;
1026 p += s0; // skip the tops preinstalled by add_req_batch
1027 for (j = s0; j < l; j++)
1028 call->set_req(p++, in_map->in(k+j));
1029 } else {
1030 p += l; // already set to top above by add_req_batch
1031 }
1032
1033 // Add the Monitors
1034 k = in_jvms->monoff();
1035 l = in_jvms->mon_size();
1036 out_jvms->set_monoff(p);
1037 for (j = 0; j < l; j++)
1038 call->set_req(p++, in_map->in(k+j));
1039
1040 // Copy any scalar object fields.
1041 k = in_jvms->scloff();
1042 l = in_jvms->scl_size();
1043 out_jvms->set_scloff(p);
1044 for (j = 0; j < l; j++)
1045 call->set_req(p++, in_map->in(k+j));
1046
1047 // Finish the new jvms.
1048 out_jvms->set_endoff(p);
1049
1050 assert(out_jvms->endoff() == debug_end, "fill ptr must match");
1051 assert(out_jvms->depth() == in_jvms->depth(), "depth must match");
1052 assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match");
1053 assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match");
1054 assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match");
1055 assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
1056
1057 // Update the two tail pointers in parallel.
1058 callee_jvms = out_jvms;
1059 out_jvms = out_jvms->caller();
1060 in_jvms = in_jvms->caller();
1061 }
1062
1063 assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
1064
1065 // Test the correctness of JVMState::debug_xxx accessors:
1066 assert(call->jvms()->debug_start() == non_debug_edges, "");
1067 assert(call->jvms()->debug_end() == call->req(), "");
1068 assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
1069 }
1070
1071 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
1072 Bytecodes::Code code = java_bc();
1073 if (code == Bytecodes::_wide) {
1074 code = method()->java_code_at_bci(bci() + 1);
1075 }
1076
1077 if (code != Bytecodes::_illegal) {
1078 depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
1214 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1215 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1216 return _gvn.transform( new AndLNode(conv, mask) );
1217 }
1218
1219 Node* GraphKit::ConvL2I(Node* offset) {
1220 // short-circuit a common case
1221 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1222 if (offset_con != (jlong)Type::OffsetBot) {
1223 return intcon((int) offset_con);
1224 }
1225 return _gvn.transform( new ConvL2INode(offset));
1226 }
1227
1228 //-------------------------load_object_klass-----------------------------------
1229 Node* GraphKit::load_object_klass(Node* obj) {
1230 // Special-case a fresh allocation to avoid building nodes:
1231 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1232 if (akls != nullptr) return akls;
1233 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1234 return _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
1235 }
1236
1237 //-------------------------load_array_length-----------------------------------
1238 Node* GraphKit::load_array_length(Node* array) {
1239 // Special-case a fresh allocation to avoid building nodes:
1240 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array);
1241 Node *alen;
1242 if (alloc == nullptr) {
1243 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1244 alen = _gvn.transform( new LoadRangeNode(nullptr, immutable_memory(), r_adr, TypeInt::POS));
1245 } else {
1246 alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false);
1247 }
1248 return alen;
1249 }
1250
1251 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc,
1252 const TypeOopPtr* oop_type,
1253 bool replace_length_in_map) {
1254 Node* length = alloc->Ideal_length();
1263 replace_in_map(length, ccast);
1264 }
1265 return ccast;
1266 }
1267 }
1268 return length;
1269 }
1270
1271 //------------------------------do_null_check----------------------------------
1272 // Helper function to do a null pointer check. Returned value is
1273 // the incoming address with null casted away. You are allowed to use the
1274 // not-null value only if you are control dependent on the test.
1275 #ifndef PRODUCT
1276 extern uint explicit_null_checks_inserted,
1277 explicit_null_checks_elided;
1278 #endif
1279 Node* GraphKit::null_check_common(Node* value, BasicType type,
1280 // optional arguments for variations:
1281 bool assert_null,
1282 Node* *null_control,
1283 bool speculative,
1284 bool is_init_check) {
1285 assert(!assert_null || null_control == nullptr, "not both at once");
1286 if (stopped()) return top();
1287 NOT_PRODUCT(explicit_null_checks_inserted++);
1288
1289 if (value->is_InlineType()) {
1290 // Null checking a scalarized but nullable inline type. Check the IsInit
1291 // input instead of the oop input to avoid keeping buffer allocations alive.
1292 InlineTypeNode* vtptr = value->as_InlineType();
1293 while (vtptr->get_oop()->is_InlineType()) {
1294 vtptr = vtptr->get_oop()->as_InlineType();
1295 }
1296 null_check_common(vtptr->get_is_init(), T_INT, assert_null, null_control, speculative, true);
1297 if (stopped()) {
1298 return top();
1299 }
1300 if (assert_null) {
1301 // TODO 8284443 Scalarize here (this currently leads to compilation bailouts)
1302 // vtptr = InlineTypeNode::make_null(_gvn, vtptr->type()->inline_klass());
1303 // replace_in_map(value, vtptr);
1304 // return vtptr;
1305 replace_in_map(value, null());
1306 return null();
1307 }
1308 bool do_replace_in_map = (null_control == nullptr || (*null_control) == top());
1309 return cast_not_null(value, do_replace_in_map);
1310 }
1311
1312 // Construct null check
1313 Node *chk = nullptr;
1314 switch(type) {
1315 case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1316 case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;
1317 case T_ARRAY : // fall through
1318 type = T_OBJECT; // simplify further tests
1319 case T_OBJECT : {
1320 const Type *t = _gvn.type( value );
1321
1322 const TypeOopPtr* tp = t->isa_oopptr();
1323 if (tp != nullptr && !tp->is_loaded()
1324 // Only for do_null_check, not any of its siblings:
1325 && !assert_null && null_control == nullptr) {
1326 // Usually, any field access or invocation on an unloaded oop type
1327 // will simply fail to link, since the statically linked class is
1328 // likely also to be unloaded. However, in -Xcomp mode, sometimes
1329 // the static class is loaded but the sharper oop type is not.
1330 // Rather than checking for this obscure case in lots of places,
1331 // we simply observe that a null check on an unloaded class
1395 }
1396 Node *oldcontrol = control();
1397 set_control(cfg);
1398 Node *res = cast_not_null(value);
1399 set_control(oldcontrol);
1400 NOT_PRODUCT(explicit_null_checks_elided++);
1401 return res;
1402 }
1403 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1404 if (cfg == nullptr) break; // Quit at region nodes
1405 depth++;
1406 }
1407 }
1408
1409 //-----------
1410 // Branch to failure if null
1411 float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
1412 Deoptimization::DeoptReason reason;
1413 if (assert_null) {
1414 reason = Deoptimization::reason_null_assert(speculative);
1415 } else if (type == T_OBJECT || is_init_check) {
1416 reason = Deoptimization::reason_null_check(speculative);
1417 } else {
1418 reason = Deoptimization::Reason_div0_check;
1419 }
1420 // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1421 // ciMethodData::has_trap_at will return a conservative -1 if any
1422 // must-be-null assertion has failed. This could cause performance
1423 // problems for a method after its first do_null_assert failure.
1424 // Consider using 'Reason_class_check' instead?
1425
1426 // To cause an implicit null check, we set the not-null probability
1427 // to the maximum (PROB_MAX). For an explicit check the probability
1428 // is set to a smaller value.
1429 if (null_control != nullptr || too_many_traps(reason)) {
1430 // probability is less likely
1431 ok_prob = PROB_LIKELY_MAG(3);
1432 } else if (!assert_null &&
1433 (ImplicitNullCheckThreshold > 0) &&
1434 method() != nullptr &&
1435 (method()->method_data()->trap_count(reason)
1469 }
1470
1471 if (assert_null) {
1472 // Cast obj to null on this path.
1473 replace_in_map(value, zerocon(type));
1474 return zerocon(type);
1475 }
1476
1477 // Cast obj to not-null on this path, if there is no null_control.
1478 // (If there is a null_control, a non-null value may come back to haunt us.)
1479 if (type == T_OBJECT) {
1480 Node* cast = cast_not_null(value, false);
1481 if (null_control == nullptr || (*null_control) == top())
1482 replace_in_map(value, cast);
1483 value = cast;
1484 }
1485
1486 return value;
1487 }
1488
1489 //------------------------------cast_not_null----------------------------------
1490 // Cast obj to not-null on this path
1491 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1492 if (obj->is_InlineType()) {
1493 Node* vt = obj->isa_InlineType()->clone_if_required(&gvn(), map(), do_replace_in_map);
1494 vt->as_InlineType()->set_is_init(_gvn);
1495 vt = _gvn.transform(vt);
1496 if (do_replace_in_map) {
1497 replace_in_map(obj, vt);
1498 }
1499 return vt;
1500 }
1501 const Type *t = _gvn.type(obj);
1502 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1503 // Object is already not-null?
1504 if( t == t_not_null ) return obj;
1505
1506 Node* cast = new CastPPNode(control(), obj,t_not_null);
1507 cast = _gvn.transform( cast );
1508
1509 // Scan for instances of 'obj' in the current JVM mapping.
1510 // These instances are known to be not-null after the test.
1511 if (do_replace_in_map)
1512 replace_in_map(obj, cast);
1513
1514 return cast; // Return casted value
1515 }
1516
1517 // Sometimes in intrinsics, we implicitly know an object is not null
1518 // (there's no actual null check) so we can cast it to not null. In
1519 // the course of optimizations, the input to the cast can become null.
1520 // In that case that data path will die and we need the control path
1609 // These are layered on top of the factory methods in LoadNode and StoreNode,
1610 // and integrate with the parser's memory state and _gvn engine.
1611 //
1612
1613 // factory methods in "int adr_idx"
1614 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1615 MemNode::MemOrd mo,
1616 LoadNode::ControlDependency control_dependency,
1617 bool require_atomic_access,
1618 bool unaligned,
1619 bool mismatched,
1620 bool unsafe,
1621 uint8_t barrier_data) {
1622 int adr_idx = C->get_alias_index(_gvn.type(adr)->isa_ptr());
1623 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1624 const TypePtr* adr_type = nullptr; // debug-mode-only argument
1625 debug_only(adr_type = C->get_adr_type(adr_idx));
1626 Node* mem = memory(adr_idx);
1627 Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
1628 ld = _gvn.transform(ld);
1629
1630 if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1631 // Improve graph before escape analysis and boxing elimination.
1632 record_for_igvn(ld);
1633 if (ld->is_DecodeN()) {
1634 // Also record the actual load (LoadN) in case ld is DecodeN. In some
1635 // rare corner cases, ld->in(1) can be something other than LoadN (e.g.,
1636 // a Phi). Recording such cases is still perfectly sound, but may be
1637 // unnecessary and result in some minor IGVN overhead.
1638 record_for_igvn(ld->in(1));
1639 }
1640 }
1641 return ld;
1642 }
1643
1644 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1645 MemNode::MemOrd mo,
1646 bool require_atomic_access,
1647 bool unaligned,
1648 bool mismatched,
1649 bool unsafe,
1663 if (unsafe) {
1664 st->as_Store()->set_unsafe_access();
1665 }
1666 st->as_Store()->set_barrier_data(barrier_data);
1667 st = _gvn.transform(st);
1668 set_memory(st, adr_idx);
1669 // Back-to-back stores can only remove intermediate store with DU info
1670 // so push on worklist for optimizer.
1671 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1672 record_for_igvn(st);
1673
1674 return st;
1675 }
1676
1677 Node* GraphKit::access_store_at(Node* obj,
1678 Node* adr,
1679 const TypePtr* adr_type,
1680 Node* val,
1681 const Type* val_type,
1682 BasicType bt,
1683 DecoratorSet decorators,
1684 bool safe_for_replace,
1685 const InlineTypeNode* vt) {
1686 // Transformation of a value which could be null pointer (CastPP #null)
1687 // could be delayed during Parse (for example, in adjust_map_after_if()).
1688 // Execute transformation here to avoid barrier generation in such case.
1689 if (_gvn.type(val) == TypePtr::NULL_PTR) {
1690 val = _gvn.makecon(TypePtr::NULL_PTR);
1691 }
1692
1693 if (stopped()) {
1694 return top(); // Dead path ?
1695 }
1696
1697 assert(val != nullptr, "not dead path");
1698 if (val->is_InlineType()) {
1699 // Store to non-flat field. Buffer the inline type and make sure
1700 // the store is re-executed if the allocation triggers deoptimization.
1701 PreserveReexecuteState preexecs(this);
1702 jvms()->set_should_reexecute(true);
1703 val = val->as_InlineType()->buffer(this, safe_for_replace);
1704 }
1705
1706 C2AccessValuePtr addr(adr, adr_type);
1707 C2AccessValue value(val, val_type);
1708 C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr, nullptr, vt);
1709 if (access.is_raw()) {
1710 return _barrier_set->BarrierSetC2::store_at(access, value);
1711 } else {
1712 return _barrier_set->store_at(access, value);
1713 }
1714 }
1715
1716 Node* GraphKit::access_load_at(Node* obj, // containing obj
1717 Node* adr, // actual address to store val at
1718 const TypePtr* adr_type,
1719 const Type* val_type,
1720 BasicType bt,
1721 DecoratorSet decorators,
1722 Node* ctl) {
1723 if (stopped()) {
1724 return top(); // Dead path ?
1725 }
1726
1727 C2AccessValuePtr addr(adr, adr_type);
1728 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr, ctl);
1729 if (access.is_raw()) {
1730 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1731 } else {
1732 return _barrier_set->load_at(access, val_type);
1733 }
1734 }
1735
1736 Node* GraphKit::access_load(Node* adr, // actual address to load val at
1737 const Type* val_type,
1738 BasicType bt,
1739 DecoratorSet decorators) {
1740 if (stopped()) {
1741 return top(); // Dead path ?
1742 }
1743
1744 C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());
1745 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, nullptr, addr);
1746 if (access.is_raw()) {
1747 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1748 } else {
1813 Node* new_val,
1814 const Type* value_type,
1815 BasicType bt,
1816 DecoratorSet decorators) {
1817 C2AccessValuePtr addr(adr, adr_type);
1818 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1819 if (access.is_raw()) {
1820 return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1821 } else {
1822 return _barrier_set->atomic_add_at(access, new_val, value_type);
1823 }
1824 }
1825
1826 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
1827 return _barrier_set->clone(this, src, dst, size, is_array);
1828 }
1829
1830 //-------------------------array_element_address-------------------------
1831 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1832 const TypeInt* sizetype, Node* ctrl) {
1833 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
1834 assert(!arytype->is_flat() || elembt == T_OBJECT, "element type of flat arrays are T_OBJECT");
1835 uint shift;
1836 if (arytype->is_flat() && arytype->klass_is_exact()) {
1837 // We can only determine the flat array layout statically if the klass is exact. Otherwise, we could have different
1838 // value classes at runtime with a potentially different layout. The caller needs to fall back to call
1839 // load/store_unknown_inline_Type() at runtime. We could return a sentinel node for the non-exact case but that
1840 // might mess with other GVN transformations in between. Thus, we just continue in the else branch normally, even
1841 // though we don't need the address node in this case and throw it away again.
1842 shift = arytype->flat_log_elem_size();
1843 } else {
1844 shift = exact_log2(type2aelembytes(elembt));
1845 }
1846 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1847
1848 // short-circuit a common case (saves lots of confusing waste motion)
1849 jint idx_con = find_int_con(idx, -1);
1850 if (idx_con >= 0) {
1851 intptr_t offset = header + ((intptr_t)idx_con << shift);
1852 return basic_plus_adr(ary, offset);
1853 }
1854
1855 // must be correct type for alignment purposes
1856 Node* base = basic_plus_adr(ary, header);
1857 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1858 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1859 return basic_plus_adr(ary, base, scale);
1860 }
1861
1862 //-------------------------load_array_element-------------------------
1863 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
1864 const Type* elemtype = arytype->elem();
1865 BasicType elembt = elemtype->array_element_basic_type();
1866 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1867 if (elembt == T_NARROWOOP) {
1868 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1869 }
1870 Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,
1871 IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0));
1872 return ld;
1873 }
1874
1875 //-------------------------set_arguments_for_java_call-------------------------
1876 // Arguments (pre-popped from the stack) are taken from the JVMS.
1877 void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline) {
1878 PreserveReexecuteState preexecs(this);
1879 if (EnableValhalla) {
1880 // Make sure the call is "re-executed", if buffering of inline type arguments triggers deoptimization.
1881 // At this point, the call hasn't been executed yet, so we will only ever execute the call once.
1882 jvms()->set_should_reexecute(true);
1883 int arg_size = method()->get_declared_signature_at_bci(bci())->arg_size_for_bc(java_bc());
1884 inc_sp(arg_size);
1885 }
1886 // Add the call arguments
1887 const TypeTuple* domain = call->tf()->domain_sig();
1888 uint nargs = domain->cnt();
1889 int arg_num = 0;
1890 for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1891 Node* arg = argument(i-TypeFunc::Parms);
1892 const Type* t = domain->field_at(i);
1893 // TODO 8284443 A static call to a mismatched method should still be scalarized
1894 if (t->is_inlinetypeptr() && !call->method()->get_Method()->mismatch() && call->method()->is_scalarized_arg(arg_num)) {
1895 // We don't pass inline type arguments by reference but instead pass each field of the inline type
1896 if (!arg->is_InlineType()) {
1897 assert(_gvn.type(arg)->is_zero_type() && !t->inline_klass()->is_null_free(), "Unexpected argument type");
1898 arg = InlineTypeNode::make_from_oop(this, arg, t->inline_klass(), t->inline_klass()->is_null_free());
1899 }
1900 InlineTypeNode* vt = arg->as_InlineType();
1901 vt->pass_fields(this, call, idx, true, !t->maybe_null());
1902 // If an inline type argument is passed as fields, attach the Method* to the call site
1903 // to be able to access the extended signature later via attached_method_before_pc().
1904 // For example, see CompiledMethod::preserve_callee_argument_oops().
1905 call->set_override_symbolic_info(true);
1906 // Register an evol dependency on the callee method to make sure that this method is deoptimized and
1907 // re-compiled with a non-scalarized calling convention if the callee method is later marked as mismatched.
1908 C->dependencies()->assert_evol_method(call->method());
1909 arg_num++;
1910 continue;
1911 } else if (arg->is_InlineType()) {
1912 // Pass inline type argument via oop to callee
1913 InlineTypeNode* inline_type = arg->as_InlineType();
1914 const ciMethod* method = call->method();
1915 ciInstanceKlass* holder = method->holder();
1916 const bool is_receiver = (i == TypeFunc::Parms);
1917 const bool is_abstract_or_object_klass_constructor = method->is_object_constructor() &&
1918 (holder->is_abstract() || holder->is_java_lang_Object());
1919 const bool is_larval_receiver_on_super_constructor = is_receiver && is_abstract_or_object_klass_constructor;
1920 bool must_init_buffer = true;
1921 // We always need to buffer inline types when they are escaping. However, we can skip the actual initialization
1922 // of the buffer if the inline type is a larval because we are going to update the buffer anyway which requires
1923 // us to create a new one. But there is one special case where we are still required to initialize the buffer:
1924 // When we have a larval receiver invoked on an abstract (value class) constructor or the Object constructor (that
1925 // is not going to be inlined). After this call, the larval is completely initialized and thus not a larval anymore.
1926 // We therefore need to force an initialization of the buffer to not lose all the field writes so far in case the
1927 // buffer needs to be used (e.g. to read from when deoptimizing at runtime) or further updated in abstract super
1928 // value class constructors which could have more fields to be initialized. Note that we do not need to
1929 // initialize the buffer when invoking another constructor in the same class on a larval receiver because we
1930 // have not initialized any fields, yet (this is done completely by the other constructor call).
1931 if (inline_type->is_larval() && !is_larval_receiver_on_super_constructor) {
1932 must_init_buffer = false;
1933 }
1934 arg = inline_type->buffer(this, true, must_init_buffer);
1935 }
1936 if (t != Type::HALF) {
1937 arg_num++;
1938 }
1939 call->init_req(idx++, arg);
1940 }
1941 }
1942
1943 //---------------------------set_edges_for_java_call---------------------------
1944 // Connect a newly created call into the current JVMS.
1945 // A return value node (if any) is returned from set_edges_for_java_call.
1946 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1947
1948 // Add the predefined inputs:
1949 call->init_req( TypeFunc::Control, control() );
1950 call->init_req( TypeFunc::I_O , i_o() );
1951 call->init_req( TypeFunc::Memory , reset_memory() );
1952 call->init_req( TypeFunc::FramePtr, frameptr() );
1953 call->init_req( TypeFunc::ReturnAdr, top() );
1954
1955 add_safepoint_edges(call, must_throw);
1956
1957 Node* xcall = _gvn.transform(call);
1958
1959 if (xcall == top()) {
1960 set_control(top());
1961 return;
1962 }
1963 assert(xcall == call, "call identity is stable");
1964
1965 // Re-use the current map to produce the result.
1966
1967 set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1968 set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj)));
1969 set_all_memory_call(xcall, separate_io_proj);
1970
1971 //return xcall; // no need, caller already has it
1972 }
1973
1974 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1975 if (stopped()) return top(); // maybe the call folded up?
1976
1977 // Note: Since any out-of-line call can produce an exception,
1978 // we always insert an I_O projection from the call into the result.
1979
1980 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1981
1982 if (separate_io_proj) {
1983 // The caller requested separate projections be used by the fall
1984 // through and exceptional paths, so replace the projections for
1985 // the fall through path.
1986 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1987 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1988 }
1989
1990 // Capture the return value, if any.
1991 Node* ret;
1992 if (call->method() == nullptr || call->method()->return_type()->basic_type() == T_VOID) {
1993 ret = top();
1994 } else if (call->tf()->returns_inline_type_as_fields()) {
1995 // Return of multiple values (inline type fields): we create a
1996 // InlineType node, each field is a projection from the call.
1997 ciInlineKlass* vk = call->method()->return_type()->as_inline_klass();
1998 uint base_input = TypeFunc::Parms;
1999 ret = InlineTypeNode::make_from_multi(this, call, vk, base_input, false, false);
2000 } else {
2001 ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
2002 ciType* t = call->method()->return_type();
2003 if (t->is_klass()) {
2004 const Type* type = TypeOopPtr::make_from_klass(t->as_klass());
2005 if (type->is_inlinetypeptr()) {
2006 ret = InlineTypeNode::make_from_oop(this, ret, type->inline_klass(), type->inline_klass()->is_null_free());
2007 }
2008 }
2009 }
2010
2011 // We just called the constructor on a value type receiver. Reload it from the buffer
2012 ciMethod* method = call->method();
2013 if (method->is_object_constructor() && !method->holder()->is_java_lang_Object()) {
2014 InlineTypeNode* inline_type_receiver = call->in(TypeFunc::Parms)->isa_InlineType();
2015 if (inline_type_receiver != nullptr) {
2016 assert(inline_type_receiver->is_larval(), "must be larval");
2017 assert(inline_type_receiver->is_allocated(&gvn()), "larval must be buffered");
2018 InlineTypeNode* reloaded = InlineTypeNode::make_from_oop(this, inline_type_receiver->get_oop(),
2019 inline_type_receiver->bottom_type()->inline_klass(), true);
2020 assert(!reloaded->is_larval(), "should not be larval anymore");
2021 replace_in_map(inline_type_receiver, reloaded);
2022 }
2023 }
2024
2025 return ret;
2026 }
2027
2028 //--------------------set_predefined_input_for_runtime_call--------------------
2029 // Reading and setting the memory state is way conservative here.
2030 // The real problem is that I am not doing real Type analysis on memory,
2031 // so I cannot distinguish card mark stores from other stores. Across a GC
2032 // point the Store Barrier and the card mark memory has to agree. I cannot
2033 // have a card mark store and its barrier split across the GC point from
2034 // either above or below. Here I get that to happen by reading ALL of memory.
2035 // A better answer would be to separate out card marks from other memory.
2036 // For now, return the input memory state, so that it can be reused
2037 // after the call, if this call has restricted memory effects.
2038 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
2039 // Set fixed predefined input arguments
2040 Node* memory = reset_memory();
2041 Node* m = narrow_mem == nullptr ? memory : narrow_mem;
2042 call->init_req( TypeFunc::Control, control() );
2043 call->init_req( TypeFunc::I_O, top() ); // does no i/o
2044 call->init_req( TypeFunc::Memory, m ); // may gc ptrs
2095 if (use->is_MergeMem()) {
2096 wl.push(use);
2097 }
2098 }
2099 }
2100
2101 // Replace the call with the current state of the kit.
2102 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes, bool do_asserts) {
2103 JVMState* ejvms = nullptr;
2104 if (has_exceptions()) {
2105 ejvms = transfer_exceptions_into_jvms();
2106 }
2107
2108 ReplacedNodes replaced_nodes = map()->replaced_nodes();
2109 ReplacedNodes replaced_nodes_exception;
2110 Node* ex_ctl = top();
2111
2112 SafePointNode* final_state = stop();
2113
2114 // Find all the needed outputs of this call
2115 CallProjections* callprojs = call->extract_projections(true, do_asserts);
2116
2117 Unique_Node_List wl;
2118 Node* init_mem = call->in(TypeFunc::Memory);
2119 Node* final_mem = final_state->in(TypeFunc::Memory);
2120 Node* final_ctl = final_state->in(TypeFunc::Control);
2121 Node* final_io = final_state->in(TypeFunc::I_O);
2122
2123 // Replace all the old call edges with the edges from the inlining result
2124 if (callprojs->fallthrough_catchproj != nullptr) {
2125 C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl);
2126 }
2127 if (callprojs->fallthrough_memproj != nullptr) {
2128 if (final_mem->is_MergeMem()) {
2129 // Parser's exits MergeMem was not transformed but may be optimized
2130 final_mem = _gvn.transform(final_mem);
2131 }
2132 C->gvn_replace_by(callprojs->fallthrough_memproj, final_mem);
2133 add_mergemem_users_to_worklist(wl, final_mem);
2134 }
2135 if (callprojs->fallthrough_ioproj != nullptr) {
2136 C->gvn_replace_by(callprojs->fallthrough_ioproj, final_io);
2137 }
2138
2139 // Replace the result with the new result if it exists and is used
2140 if (callprojs->resproj[0] != nullptr && result != nullptr) {
2141 // If the inlined code is dead, the result projections for an inline type returned as
2142 // fields have not been replaced. They will go away once the call is replaced by TOP below.
2143 assert(callprojs->nb_resproj == 1 || (call->tf()->returns_inline_type_as_fields() && stopped()),
2144 "unexpected number of results");
2145 C->gvn_replace_by(callprojs->resproj[0], result);
2146 }
2147
2148 if (ejvms == nullptr) {
2149 // No exception edges to simply kill off those paths
2150 if (callprojs->catchall_catchproj != nullptr) {
2151 C->gvn_replace_by(callprojs->catchall_catchproj, C->top());
2152 }
2153 if (callprojs->catchall_memproj != nullptr) {
2154 C->gvn_replace_by(callprojs->catchall_memproj, C->top());
2155 }
2156 if (callprojs->catchall_ioproj != nullptr) {
2157 C->gvn_replace_by(callprojs->catchall_ioproj, C->top());
2158 }
2159 // Replace the old exception object with top
2160 if (callprojs->exobj != nullptr) {
2161 C->gvn_replace_by(callprojs->exobj, C->top());
2162 }
2163 } else {
2164 GraphKit ekit(ejvms);
2165
2166 // Load my combined exception state into the kit, with all phis transformed:
2167 SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
2168 replaced_nodes_exception = ex_map->replaced_nodes();
2169
2170 Node* ex_oop = ekit.use_exception_state(ex_map);
2171
2172 if (callprojs->catchall_catchproj != nullptr) {
2173 C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control());
2174 ex_ctl = ekit.control();
2175 }
2176 if (callprojs->catchall_memproj != nullptr) {
2177 Node* ex_mem = ekit.reset_memory();
2178 C->gvn_replace_by(callprojs->catchall_memproj, ex_mem);
2179 add_mergemem_users_to_worklist(wl, ex_mem);
2180 }
2181 if (callprojs->catchall_ioproj != nullptr) {
2182 C->gvn_replace_by(callprojs->catchall_ioproj, ekit.i_o());
2183 }
2184
2185 // Replace the old exception object with the newly created one
2186 if (callprojs->exobj != nullptr) {
2187 C->gvn_replace_by(callprojs->exobj, ex_oop);
2188 }
2189 }
2190
2191 // Disconnect the call from the graph
2192 call->disconnect_inputs(C);
2193 C->gvn_replace_by(call, C->top());
2194
2195 // Clean up any MergeMems that feed other MergeMems since the
2196 // optimizer doesn't like that.
2197 while (wl.size() > 0) {
2198 _gvn.transform(wl.pop());
2199 }
2200
2201 if (callprojs->fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) {
2202 replaced_nodes.apply(C, final_ctl);
2203 }
2204 if (!ex_ctl->is_top() && do_replaced_nodes) {
2205 replaced_nodes_exception.apply(C, ex_ctl);
2206 }
2207 }
2208
2209
2210 //------------------------------increment_counter------------------------------
2211 // for statistics: increment a VM counter by 1
2212
2213 void GraphKit::increment_counter(address counter_addr) {
2214 Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2215 increment_counter(adr1);
2216 }
2217
2218 void GraphKit::increment_counter(Node* counter_addr) {
2219 Node* ctrl = control();
2220 Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, MemNode::unordered);
2221 Node* incr = _gvn.transform(new AddLNode(cnt, _gvn.longcon(1)));
2381 *
2382 * @param n node that the type applies to
2383 * @param exact_kls type from profiling
2384 * @param maybe_null did profiling see null?
2385 *
2386 * @return node with improved type
2387 */
2388 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2389 const Type* current_type = _gvn.type(n);
2390 assert(UseTypeSpeculation, "type speculation must be on");
2391
2392 const TypePtr* speculative = current_type->speculative();
2393
2394 // Should the klass from the profile be recorded in the speculative type?
2395 if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2396 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls, Type::trust_interfaces);
2397 const TypeOopPtr* xtype = tklass->as_instance_type();
2398 assert(xtype->klass_is_exact(), "Should be exact");
2399 // Any reason to believe n is not null (from this profiling or a previous one)?
2400 assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2401 const TypePtr* ptr = (ptr_kind != ProfileNeverNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2402 // record the new speculative type's depth
2403 speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2404 speculative = speculative->with_inline_depth(jvms()->depth());
2405 } else if (current_type->would_improve_ptr(ptr_kind)) {
2406 // Profiling report that null was never seen so we can change the
2407 // speculative type to non null ptr.
2408 if (ptr_kind == ProfileAlwaysNull) {
2409 speculative = TypePtr::NULL_PTR;
2410 } else {
2411 assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2412 const TypePtr* ptr = TypePtr::NOTNULL;
2413 if (speculative != nullptr) {
2414 speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2415 } else {
2416 speculative = ptr;
2417 }
2418 }
2419 }
2420
2421 if (speculative != current_type->speculative()) {
2422 // Build a type with a speculative type (what we think we know
2423 // about the type but will need a guard when we use it)
2424 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative);
2425 // We're changing the type, we need a new CheckCast node to carry
2426 // the new type. The new type depends on the control: what
2427 // profiling tells us is only valid from here as far as we can
2428 // tell.
2429 Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2430 cast = _gvn.transform(cast);
2431 replace_in_map(n, cast);
2432 n = cast;
2433 }
2434
2435 return n;
2436 }
2437
2438 /**
2439 * Record profiling data from receiver profiling at an invoke with the
2440 * type system so that it can propagate it (speculation)
2441 *
2442 * @param n receiver node
2443 *
2444 * @return node with improved type
2445 */
2446 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2447 if (!UseTypeSpeculation) {
2448 return n;
2449 }
2450 ciKlass* exact_kls = profile_has_unique_klass();
2451 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2452 if ((java_bc() == Bytecodes::_checkcast ||
2453 java_bc() == Bytecodes::_instanceof ||
2454 java_bc() == Bytecodes::_aastore) &&
2455 method()->method_data()->is_mature()) {
2456 ciProfileData* data = method()->method_data()->bci_to_data(bci());
2457 if (data != nullptr) {
2458 if (java_bc() == Bytecodes::_aastore) {
2459 ciKlass* array_type = nullptr;
2460 ciKlass* element_type = nullptr;
2461 ProfilePtrKind element_ptr = ProfileMaybeNull;
2462 bool flat_array = true;
2463 bool null_free_array = true;
2464 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
2465 exact_kls = element_type;
2466 ptr_kind = element_ptr;
2467 } else {
2468 if (!data->as_BitData()->null_seen()) {
2469 ptr_kind = ProfileNeverNull;
2470 } else {
2471 assert(data->is_ReceiverTypeData(), "bad profile data type");
2472 ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2473 uint i = 0;
2474 for (; i < call->row_limit(); i++) {
2475 ciKlass* receiver = call->receiver(i);
2476 if (receiver != nullptr) {
2477 break;
2478 }
2479 }
2480 ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2481 }
2482 }
2483 }
2484 }
2485 return record_profile_for_speculation(n, exact_kls, ptr_kind);
2486 }
2487
2488 /**
2489 * Record profiling data from argument profiling at an invoke with the
2490 * type system so that it can propagate it (speculation)
2491 *
2492 * @param dest_method target method for the call
2493 * @param bc what invoke bytecode is this?
2494 */
2495 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2496 if (!UseTypeSpeculation) {
2497 return;
2498 }
2499 const TypeFunc* tf = TypeFunc::make(dest_method);
2500 int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2501 int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2502 for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2503 const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2504 if (is_reference_type(targ->basic_type())) {
2505 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2506 ciKlass* better_type = nullptr;
2507 if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2508 record_profile_for_speculation(argument(j), better_type, ptr_kind);
2509 }
2510 i++;
2511 }
2512 }
2513 }
2514
2515 /**
2516 * Record profiling data from parameter profiling at an invoke with
2517 * the type system so that it can propagate it (speculation)
2518 */
2519 void GraphKit::record_profiled_parameters_for_speculation() {
2520 if (!UseTypeSpeculation) {
2521 return;
2522 }
2523 for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
2537 * the type system so that it can propagate it (speculation)
2538 */
2539 void GraphKit::record_profiled_return_for_speculation() {
2540 if (!UseTypeSpeculation) {
2541 return;
2542 }
2543 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2544 ciKlass* better_type = nullptr;
2545 if (method()->return_profiled_type(bci(), better_type, ptr_kind)) {
2546 // If profiling reports a single type for the return value,
2547 // feed it to the type system so it can propagate it as a
2548 // speculative type
2549 record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind);
2550 }
2551 }
2552
2553 void GraphKit::round_double_arguments(ciMethod* dest_method) {
2554 if (Matcher::strict_fp_requires_explicit_rounding) {
2555 // (Note: TypeFunc::make has a cache that makes this fast.)
2556 const TypeFunc* tf = TypeFunc::make(dest_method);
2557 int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2558 for (int j = 0; j < nargs; j++) {
2559 const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2560 if (targ->basic_type() == T_DOUBLE) {
2561 // If any parameters are doubles, they must be rounded before
2562 // the call, dprecision_rounding does gvn.transform
2563 Node *arg = argument(j);
2564 arg = dprecision_rounding(arg);
2565 set_argument(j, arg);
2566 }
2567 }
2568 }
2569 }
2570
2571 // rounding for strict float precision conformance
2572 Node* GraphKit::precision_rounding(Node* n) {
2573 if (Matcher::strict_fp_requires_explicit_rounding) {
2574 #ifdef IA32
2575 if (UseSSE == 0) {
2576 return _gvn.transform(new RoundFloatNode(nullptr, n));
2577 }
2578 #else
2579 Unimplemented();
2688 // The first null ends the list.
2689 Node* parm0, Node* parm1,
2690 Node* parm2, Node* parm3,
2691 Node* parm4, Node* parm5,
2692 Node* parm6, Node* parm7) {
2693 assert(call_addr != nullptr, "must not call null targets");
2694
2695 // Slow-path call
2696 bool is_leaf = !(flags & RC_NO_LEAF);
2697 bool has_io = (!is_leaf && !(flags & RC_NO_IO));
2698 if (call_name == nullptr) {
2699 assert(!is_leaf, "must supply name for leaf");
2700 call_name = OptoRuntime::stub_name(call_addr);
2701 }
2702 CallNode* call;
2703 if (!is_leaf) {
2704 call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
2705 } else if (flags & RC_NO_FP) {
2706 call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2707 } else if (flags & RC_VECTOR){
2708 uint num_bits = call_type->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
2709 call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
2710 } else {
2711 call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2712 }
2713
2714 // The following is similar to set_edges_for_java_call,
2715 // except that the memory effects of the call are restricted to AliasIdxRaw.
2716
2717 // Slow path call has no side-effects, uses few values
2718 bool wide_in = !(flags & RC_NARROW_MEM);
2719 bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2720
2721 Node* prev_mem = nullptr;
2722 if (wide_in) {
2723 prev_mem = set_predefined_input_for_runtime_call(call);
2724 } else {
2725 assert(!wide_out, "narrow in => narrow out");
2726 Node* narrow_mem = memory(adr_type);
2727 prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2728 }
2768
2769 if (has_io) {
2770 set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2771 }
2772 return call;
2773
2774 }
2775
2776 // i2b
2777 Node* GraphKit::sign_extend_byte(Node* in) {
2778 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
2779 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
2780 }
2781
2782 // i2s
2783 Node* GraphKit::sign_extend_short(Node* in) {
2784 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
2785 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
2786 }
2787
2788
2789 //------------------------------merge_memory-----------------------------------
2790 // Merge memory from one path into the current memory state.
2791 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
2792 for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
2793 Node* old_slice = mms.force_memory();
2794 Node* new_slice = mms.memory2();
2795 if (old_slice != new_slice) {
2796 PhiNode* phi;
2797 if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
2798 if (mms.is_empty()) {
2799 // clone base memory Phi's inputs for this memory slice
2800 assert(old_slice == mms.base_memory(), "sanity");
2801 phi = PhiNode::make(region, nullptr, Type::MEMORY, mms.adr_type(C));
2802 _gvn.set_type(phi, Type::MEMORY);
2803 for (uint i = 1; i < phi->req(); i++) {
2804 phi->init_req(i, old_slice->in(i));
2805 }
2806 } else {
2807 phi = old_slice->as_Phi(); // Phi was generated already
2808 }
3071
3072 // Now do a linear scan of the secondary super-klass array. Again, no real
3073 // performance impact (too rare) but it's gotta be done.
3074 // Since the code is rarely used, there is no penalty for moving it
3075 // out of line, and it can only improve I-cache density.
3076 // The decision to inline or out-of-line this final check is platform
3077 // dependent, and is found in the AD file definition of PartialSubtypeCheck.
3078 Node* psc = gvn.transform(
3079 new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
3080
3081 IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
3082 r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
3083 r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
3084
3085 // Return false path; set default control to true path.
3086 *ctrl = gvn.transform(r_ok_subtype);
3087 return gvn.transform(r_not_subtype);
3088 }
3089
3090 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
3091 const Type* sub_t = _gvn.type(obj_or_subklass);
3092 if (sub_t->make_oopptr() != nullptr && sub_t->make_oopptr()->is_inlinetypeptr()) {
3093 sub_t = TypeKlassPtr::make(sub_t->inline_klass());
3094 obj_or_subklass = makecon(sub_t);
3095 }
3096 bool expand_subtype_check = C->post_loop_opts_phase(); // macro node expansion is over
3097 if (expand_subtype_check) {
3098 MergeMemNode* mem = merged_memory();
3099 Node* ctrl = control();
3100 Node* subklass = obj_or_subklass;
3101 if (!sub_t->isa_klassptr()) {
3102 subklass = load_object_klass(obj_or_subklass);
3103 }
3104
3105 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn, method(), bci());
3106 set_control(ctrl);
3107 return n;
3108 }
3109
3110 Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass, method(), bci()));
3111 Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
3112 IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
3113 set_control(_gvn.transform(new IfTrueNode(iff)));
3114 return _gvn.transform(new IfFalseNode(iff));
3115 }
3116
3117 // Profile-driven exact type check:
3118 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
3119 float prob, Node* *casted_receiver) {
3120 assert(!klass->is_interface(), "no exact type check on interfaces");
3121 Node* fail = top();
3122 const Type* rec_t = _gvn.type(receiver);
3123 if (rec_t->is_inlinetypeptr()) {
3124 if (klass->equals(rec_t->inline_klass())) {
3125 (*casted_receiver) = receiver; // Always passes
3126 } else {
3127 (*casted_receiver) = top(); // Always fails
3128 fail = control();
3129 set_control(top());
3130 }
3131 return fail;
3132 }
3133 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces);
3134 Node* recv_klass = load_object_klass(receiver);
3135 fail = type_check(recv_klass, tklass, prob);
3136
3137 if (!stopped()) {
3138 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3139 const TypeOopPtr* recv_xtype = tklass->as_instance_type();
3140 assert(recv_xtype->klass_is_exact(), "");
3141
3142 if (!receiver_type->higher_equal(recv_xtype)) { // ignore redundant casts
3143 // Subsume downstream occurrences of receiver with a cast to
3144 // recv_xtype, since now we know what the type will be.
3145 Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype);
3146 Node* res = _gvn.transform(cast);
3147 if (recv_xtype->is_inlinetypeptr()) {
3148 assert(!gvn().type(res)->maybe_null(), "receiver should never be null");
3149 res = InlineTypeNode::make_from_oop(this, res, recv_xtype->inline_klass());
3150 }
3151 (*casted_receiver) = res;
3152 assert(!(*casted_receiver)->is_top(), "that path should be unreachable");
3153 // (User must make the replace_in_map call.)
3154 }
3155 }
3156
3157 return fail;
3158 }
3159
3160 Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass,
3161 float prob) {
3162 Node* want_klass = makecon(tklass);
3163 Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
3164 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
3165 IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
3166 set_control(_gvn.transform(new IfTrueNode (iff)));
3167 Node* fail = _gvn.transform(new IfFalseNode(iff));
3168 return fail;
3169 }
3170
3171 //------------------------------subtype_check_receiver-------------------------
3172 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
3173 Node** casted_receiver) {
3174 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces)->try_improve();
3175 Node* want_klass = makecon(tklass);
3176
3177 Node* slow_ctl = gen_subtype_check(receiver, want_klass);
3178
3179 // Ignore interface type information until interface types are properly tracked.
3180 if (!stopped() && !klass->is_interface()) {
3181 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3182 const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
3183 if (receiver_type != nullptr && !receiver_type->higher_equal(recv_type)) { // ignore redundant casts
3184 Node* cast = _gvn.transform(new CheckCastPPNode(control(), receiver, recv_type));
3185 if (recv_type->is_inlinetypeptr()) {
3186 cast = InlineTypeNode::make_from_oop(this, cast, recv_type->inline_klass());
3187 }
3188 (*casted_receiver) = cast;
3189 }
3190 }
3191
3192 return slow_ctl;
3193 }
3194
3195 //------------------------------seems_never_null-------------------------------
3196 // Use null_seen information if it is available from the profile.
3197 // If we see an unexpected null at a type check we record it and force a
3198 // recompile; the offending check will be recompiled to handle nulls.
3199 // If we see several offending BCIs, then all checks in the
3200 // method will be recompiled.
3201 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
3202 speculating = !_gvn.type(obj)->speculative_maybe_null();
3203 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
3204 if (UncommonNullCast // Cutout for this technique
3205 && obj != null() // And not the -Xcomp stupid case?
3206 && !too_many_traps(reason)
3207 ) {
3208 if (speculating) {
3277
3278 //------------------------maybe_cast_profiled_receiver-------------------------
3279 // If the profile has seen exactly one type, narrow to exactly that type.
3280 // Subsequent type checks will always fold up.
3281 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3282 const TypeKlassPtr* require_klass,
3283 ciKlass* spec_klass,
3284 bool safe_for_replace) {
3285 if (!UseTypeProfile || !TypeProfileCasts) return nullptr;
3286
3287 Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != nullptr);
3288
3289 // Make sure we haven't already deoptimized from this tactic.
3290 if (too_many_traps_or_recompiles(reason))
3291 return nullptr;
3292
3293 // (No, this isn't a call, but it's enough like a virtual call
3294 // to use the same ciMethod accessor to get the profile info...)
3295 // If we have a speculative type use it instead of profiling (which
3296 // may not help us)
3297 ciKlass* exact_kls = spec_klass;
3298 if (exact_kls == nullptr) {
3299 if (java_bc() == Bytecodes::_aastore) {
3300 ciKlass* array_type = nullptr;
3301 ciKlass* element_type = nullptr;
3302 ProfilePtrKind element_ptr = ProfileMaybeNull;
3303 bool flat_array = true;
3304 bool null_free_array = true;
3305 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
3306 exact_kls = element_type;
3307 } else {
3308 exact_kls = profile_has_unique_klass();
3309 }
3310 }
3311 if (exact_kls != nullptr) {// no cast failures here
3312 if (require_klass == nullptr ||
3313 C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) {
3314 // If we narrow the type to match what the type profile sees or
3315 // the speculative type, we can then remove the rest of the
3316 // cast.
3317 // This is a win, even if the exact_kls is very specific,
3318 // because downstream operations, such as method calls,
3319 // will often benefit from the sharper type.
3320 Node* exact_obj = not_null_obj; // will get updated in place...
3321 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
3322 &exact_obj);
3323 { PreserveJVMState pjvms(this);
3324 set_control(slow_ctl);
3325 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3326 }
3327 if (safe_for_replace) {
3328 replace_in_map(not_null_obj, exact_obj);
3329 }
3330 return exact_obj;
3420 // If not_null_obj is dead, only null-path is taken
3421 if (stopped()) { // Doing instance-of on a null?
3422 set_control(null_ctl);
3423 return intcon(0);
3424 }
3425 region->init_req(_null_path, null_ctl);
3426 phi ->init_req(_null_path, intcon(0)); // Set null path value
3427 if (null_ctl == top()) {
3428 // Do this eagerly, so that pattern matches like is_diamond_phi
3429 // will work even during parsing.
3430 assert(_null_path == PATH_LIMIT-1, "delete last");
3431 region->del_req(_null_path);
3432 phi ->del_req(_null_path);
3433 }
3434
3435 // Do we know the type check always succeed?
3436 bool known_statically = false;
3437 if (_gvn.type(superklass)->singleton()) {
3438 const TypeKlassPtr* superk = _gvn.type(superklass)->is_klassptr();
3439 const TypeKlassPtr* subk = _gvn.type(obj)->is_oopptr()->as_klass_type();
3440 if (subk != nullptr && subk->is_loaded()) {
3441 int static_res = C->static_subtype_check(superk, subk);
3442 known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
3443 }
3444 }
3445
3446 if (!known_statically) {
3447 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3448 // We may not have profiling here or it may not help us. If we
3449 // have a speculative type use it to perform an exact cast.
3450 ciKlass* spec_obj_type = obj_type->speculative_type();
3451 if (spec_obj_type != nullptr || (ProfileDynamicTypes && data != nullptr)) {
3452 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, nullptr, spec_obj_type, safe_for_replace);
3453 if (stopped()) { // Profile disagrees with this path.
3454 set_control(null_ctl); // Null is the only remaining possibility.
3455 return intcon(0);
3456 }
3457 if (cast_obj != nullptr) {
3458 not_null_obj = cast_obj;
3459 }
3460 }
3476 record_for_igvn(region);
3477
3478 // If we know the type check always succeeds then we don't use the
3479 // profiling data at this bytecode. Don't lose it, feed it to the
3480 // type system as a speculative type.
3481 if (safe_for_replace) {
3482 Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3483 replace_in_map(obj, casted_obj);
3484 }
3485
3486 return _gvn.transform(phi);
3487 }
3488
3489 //-------------------------------gen_checkcast---------------------------------
3490 // Generate a checkcast idiom. Used by both the checkcast bytecode and the
3491 // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
3492 // uncommon-trap paths work. Adjust stack after this call.
3493 // If failure_control is supplied and not null, it is filled in with
3494 // the control edge for the cast failure. Otherwise, an appropriate
3495 // uncommon trap or exception is thrown.
3496 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_control, bool null_free) {
3497 kill_dead_locals(); // Benefit all the uncommon traps
3498 const TypeKlassPtr* klass_ptr_type = _gvn.type(superklass)->is_klassptr();
3499 const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve();
3500 const TypeOopPtr* toop = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type();
3501 bool safe_for_replace = (failure_control == nullptr);
3502 assert(!null_free || toop->can_be_inline_type(), "must be an inline type pointer");
3503
3504 // Fast cutout: Check the case that the cast is vacuously true.
3505 // This detects the common cases where the test will short-circuit
3506 // away completely. We do this before we perform the null check,
3507 // because if the test is going to turn into zero code, we don't
3508 // want a residual null check left around. (Causes a slowdown,
3509 // for example, in some objArray manipulations, such as a[i]=a[j].)
3510 if (improved_klass_ptr_type->singleton()) {
3511 const TypeKlassPtr* kptr = nullptr;
3512 const Type* t = _gvn.type(obj);
3513 if (t->isa_oop_ptr()) {
3514 kptr = t->is_oopptr()->as_klass_type();
3515 } else if (obj->is_InlineType()) {
3516 ciInlineKlass* vk = t->inline_klass();
3517 kptr = TypeInstKlassPtr::make(TypePtr::NotNull, vk, Type::Offset(0));
3518 }
3519 if (kptr != nullptr) {
3520 switch (C->static_subtype_check(improved_klass_ptr_type, kptr)) {
3521 case Compile::SSC_always_true:
3522 // If we know the type check always succeed then we don't use
3523 // the profiling data at this bytecode. Don't lose it, feed it
3524 // to the type system as a speculative type.
3525 obj = record_profiled_receiver_for_speculation(obj);
3526 if (null_free) {
3527 assert(safe_for_replace, "must be");
3528 obj = null_check(obj);
3529 }
3530 assert(stopped() || !toop->is_inlinetypeptr() || obj->is_InlineType(), "should have been scalarized");
3531 return obj;
3532 case Compile::SSC_always_false:
3533 if (null_free) {
3534 assert(safe_for_replace, "must be");
3535 obj = null_check(obj);
3536 }
3537 // It needs a null check because a null will *pass* the cast check.
3538 if (t->isa_oopptr() != nullptr && !t->is_oopptr()->maybe_null()) {
3539 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3540 Deoptimization::DeoptReason reason = is_aastore ?
3541 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3542 builtin_throw(reason);
3543 return top();
3544 } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3545 return null_assert(obj);
3546 }
3547 break; // Fall through to full check
3548 default:
3549 break;
3550 }
3551 }
3552 }
3553
3554 ciProfileData* data = nullptr;
3555 if (failure_control == nullptr) { // use MDO in regular case only
3556 assert(java_bc() == Bytecodes::_aastore ||
3557 java_bc() == Bytecodes::_checkcast,
3558 "interpreter profiles type checks only for these BCs");
3559 if (method()->method_data()->is_mature()) {
3560 data = method()->method_data()->bci_to_data(bci());
3561 }
3562 }
3563
3564 // Make the merge point
3565 enum { _obj_path = 1, _null_path, PATH_LIMIT };
3566 RegionNode* region = new RegionNode(PATH_LIMIT);
3567 Node* phi = new PhiNode(region, toop);
3568 _gvn.set_type(region, Type::CONTROL);
3569 _gvn.set_type(phi, toop);
3570
3571 C->set_has_split_ifs(true); // Has chance for split-if optimization
3572
3573 // Use null-cast information if it is available
3574 bool speculative_not_null = false;
3575 bool never_see_null = ((failure_control == nullptr) // regular case only
3576 && seems_never_null(obj, data, speculative_not_null));
3577
3578 if (obj->is_InlineType()) {
3579 // Re-execute if buffering during triggers deoptimization
3580 PreserveReexecuteState preexecs(this);
3581 jvms()->set_should_reexecute(true);
3582 obj = obj->as_InlineType()->buffer(this, safe_for_replace);
3583 }
3584
3585 // Null check; get casted pointer; set region slot 3
3586 Node* null_ctl = top();
3587 Node* not_null_obj = nullptr;
3588 if (null_free) {
3589 assert(safe_for_replace, "must be");
3590 not_null_obj = null_check(obj);
3591 } else {
3592 not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3593 }
3594
3595 // If not_null_obj is dead, only null-path is taken
3596 if (stopped()) { // Doing instance-of on a null?
3597 set_control(null_ctl);
3598 if (toop->is_inlinetypeptr()) {
3599 return InlineTypeNode::make_null(_gvn, toop->inline_klass());
3600 }
3601 return null();
3602 }
3603 region->init_req(_null_path, null_ctl);
3604 phi ->init_req(_null_path, null()); // Set null path value
3605 if (null_ctl == top()) {
3606 // Do this eagerly, so that pattern matches like is_diamond_phi
3607 // will work even during parsing.
3608 assert(_null_path == PATH_LIMIT-1, "delete last");
3609 region->del_req(_null_path);
3610 phi ->del_req(_null_path);
3611 }
3612
3613 Node* cast_obj = nullptr;
3614 if (improved_klass_ptr_type->klass_is_exact()) {
3615 // The following optimization tries to statically cast the speculative type of the object
3616 // (for example obtained during profiling) to the type of the superklass and then do a
3617 // dynamic check that the type of the object is what we expect. To work correctly
3618 // for checkcast and aastore the type of superklass should be exact.
3619 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3620 // We may not have profiling here or it may not help us. If we have
3621 // a speculative type use it to perform an exact cast.
3622 ciKlass* spec_obj_type = obj_type->speculative_type();
3623 if (spec_obj_type != nullptr || data != nullptr) {
3624 cast_obj = maybe_cast_profiled_receiver(not_null_obj, improved_klass_ptr_type, spec_obj_type, safe_for_replace);
3625 if (cast_obj != nullptr) {
3626 if (failure_control != nullptr) // failure is now impossible
3627 (*failure_control) = top();
3628 // adjust the type of the phi to the exact klass:
3629 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3630 }
3631 }
3632 }
3633
3634 if (cast_obj == nullptr) {
3635 // Generate the subtype check
3636 Node* improved_superklass = superklass;
3637 if (improved_klass_ptr_type != klass_ptr_type && improved_klass_ptr_type->singleton()) {
3638 // Only improve the super class for constants which allows subsequent sub type checks to possibly be commoned up.
3639 // The other non-constant cases cannot be improved with a cast node here since they could be folded to top.
3640 // Additionally, the benefit would only be minor in non-constant cases.
3641 improved_superklass = makecon(improved_klass_ptr_type);
3642 }
3643 Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, improved_superklass);
3644 // Plug in success path into the merge
3645 cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3646 // Failure path ends in uncommon trap (or may be dead - failure impossible)
3647 if (failure_control == nullptr) {
3648 if (not_subtype_ctrl != top()) { // If failure is possible
3649 PreserveJVMState pjvms(this);
3650 set_control(not_subtype_ctrl);
3651 Node* obj_klass = nullptr;
3652 if (not_null_obj->is_InlineType()) {
3653 obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->inline_klass()));
3654 } else {
3655 obj_klass = load_object_klass(not_null_obj);
3656 }
3657 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3658 Deoptimization::DeoptReason reason = is_aastore ?
3659 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3660 builtin_throw(reason);
3661 }
3662 } else {
3663 (*failure_control) = not_subtype_ctrl;
3664 }
3665 }
3666
3667 region->init_req(_obj_path, control());
3668 phi ->init_req(_obj_path, cast_obj);
3669
3670 // A merge of null or Casted-NotNull obj
3671 Node* res = _gvn.transform(phi);
3672
3673 // Note I do NOT always 'replace_in_map(obj,result)' here.
3674 // if( tk->klass()->can_be_primary_super() )
3675 // This means that if I successfully store an Object into an array-of-String
3676 // I 'forget' that the Object is really now known to be a String. I have to
3677 // do this because we don't have true union types for interfaces - if I store
3678 // a Baz into an array-of-Interface and then tell the optimizer it's an
3679 // Interface, I forget that it's also a Baz and cannot do Baz-like field
3680 // references to it. FIX THIS WHEN UNION TYPES APPEAR!
3681 // replace_in_map( obj, res );
3682
3683 // Return final merged results
3684 set_control( _gvn.transform(region) );
3685 record_for_igvn(region);
3686
3687 bool not_inline = !toop->can_be_inline_type();
3688 bool not_flat_in_array = !UseArrayFlattening || not_inline || (toop->is_inlinetypeptr() && !toop->inline_klass()->flat_in_array());
3689 if (EnableValhalla && not_flat_in_array) {
3690 // Check if obj has been loaded from an array
3691 obj = obj->isa_DecodeN() ? obj->in(1) : obj;
3692 Node* array = nullptr;
3693 if (obj->isa_Load()) {
3694 Node* address = obj->in(MemNode::Address);
3695 if (address->isa_AddP()) {
3696 array = address->as_AddP()->in(AddPNode::Base);
3697 }
3698 } else if (obj->is_Phi()) {
3699 Node* region = obj->in(0);
3700 // TODO make this more robust (see JDK-8231346)
3701 if (region->req() == 3 && region->in(2) != nullptr && region->in(2)->in(0) != nullptr) {
3702 IfNode* iff = region->in(2)->in(0)->isa_If();
3703 if (iff != nullptr) {
3704 iff->is_flat_array_check(&_gvn, &array);
3705 }
3706 }
3707 }
3708 if (array != nullptr) {
3709 const TypeAryPtr* ary_t = _gvn.type(array)->isa_aryptr();
3710 if (ary_t != nullptr && !ary_t->is_flat()) {
3711 if (!ary_t->is_not_null_free() && not_inline) {
3712 // Casting array element to a non-inline-type, mark array as not null-free.
3713 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_null_free()));
3714 replace_in_map(array, cast);
3715 } else if (!ary_t->is_not_flat()) {
3716 // Casting array element to a non-flat type, mark array as not flat.
3717 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_flat()));
3718 replace_in_map(array, cast);
3719 }
3720 }
3721 }
3722 }
3723
3724 if (!stopped() && !res->is_InlineType()) {
3725 res = record_profiled_receiver_for_speculation(res);
3726 if (toop->is_inlinetypeptr()) {
3727 Node* vt = InlineTypeNode::make_from_oop(this, res, toop->inline_klass(), !gvn().type(res)->maybe_null());
3728 res = vt;
3729 if (safe_for_replace) {
3730 replace_in_map(obj, vt);
3731 replace_in_map(not_null_obj, vt);
3732 replace_in_map(res, vt);
3733 }
3734 }
3735 }
3736 return res;
3737 }
3738
3739 Node* GraphKit::mark_word_test(Node* obj, uintptr_t mask_val, bool eq, bool check_lock) {
3740 // Load markword
3741 Node* mark_adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3742 Node* mark = make_load(nullptr, mark_adr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
3743 if (check_lock) {
3744 // Check if obj is locked
3745 Node* locked_bit = MakeConX(markWord::unlocked_value);
3746 locked_bit = _gvn.transform(new AndXNode(locked_bit, mark));
3747 Node* cmp = _gvn.transform(new CmpXNode(locked_bit, MakeConX(0)));
3748 Node* is_unlocked = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3749 IfNode* iff = new IfNode(control(), is_unlocked, PROB_MAX, COUNT_UNKNOWN);
3750 _gvn.transform(iff);
3751 Node* locked_region = new RegionNode(3);
3752 Node* mark_phi = new PhiNode(locked_region, TypeX_X);
3753
3754 // Unlocked: Use bits from mark word
3755 locked_region->init_req(1, _gvn.transform(new IfTrueNode(iff)));
3756 mark_phi->init_req(1, mark);
3757
3758 // Locked: Load prototype header from klass
3759 set_control(_gvn.transform(new IfFalseNode(iff)));
3760 // Make loads control dependent to make sure they are only executed if array is locked
3761 Node* klass_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
3762 Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
3763 Node* proto_adr = basic_plus_adr(klass, in_bytes(Klass::prototype_header_offset()));
3764 Node* proto = _gvn.transform(LoadNode::make(_gvn, control(), C->immutable_memory(), proto_adr, proto_adr->bottom_type()->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
3765
3766 locked_region->init_req(2, control());
3767 mark_phi->init_req(2, proto);
3768 set_control(_gvn.transform(locked_region));
3769 record_for_igvn(locked_region);
3770
3771 mark = mark_phi;
3772 }
3773
3774 // Now check if mark word bits are set
3775 Node* mask = MakeConX(mask_val);
3776 Node* masked = _gvn.transform(new AndXNode(_gvn.transform(mark), mask));
3777 record_for_igvn(masked); // Give it a chance to be optimized out by IGVN
3778 Node* cmp = _gvn.transform(new CmpXNode(masked, mask));
3779 return _gvn.transform(new BoolNode(cmp, eq ? BoolTest::eq : BoolTest::ne));
3780 }
3781
3782 Node* GraphKit::inline_type_test(Node* obj, bool is_inline) {
3783 return mark_word_test(obj, markWord::inline_type_pattern, is_inline, /* check_lock = */ false);
3784 }
3785
3786 Node* GraphKit::flat_array_test(Node* array_or_klass, bool flat) {
3787 // We can't use immutable memory here because the mark word is mutable.
3788 // PhaseIdealLoop::move_flat_array_check_out_of_loop will make sure the
3789 // check is moved out of loops (mainly to enable loop unswitching).
3790 Node* cmp = _gvn.transform(new FlatArrayCheckNode(C, memory(Compile::AliasIdxRaw), array_or_klass));
3791 record_for_igvn(cmp); // Give it a chance to be optimized out by IGVN
3792 return _gvn.transform(new BoolNode(cmp, flat ? BoolTest::eq : BoolTest::ne));
3793 }
3794
3795 Node* GraphKit::null_free_array_test(Node* array, bool null_free) {
3796 return mark_word_test(array, markWord::null_free_array_bit_in_place, null_free);
3797 }
3798
3799 // Deoptimize if 'ary' is a null-free inline type array and 'val' is null
3800 Node* GraphKit::inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace) {
3801 RegionNode* region = new RegionNode(3);
3802 Node* null_ctl = top();
3803 null_check_oop(val, &null_ctl);
3804 if (null_ctl != top()) {
3805 PreserveJVMState pjvms(this);
3806 set_control(null_ctl);
3807 {
3808 // Deoptimize if null-free array
3809 BuildCutout unless(this, null_free_array_test(ary, /* null_free = */ false), PROB_MAX);
3810 inc_sp(nargs);
3811 uncommon_trap(Deoptimization::Reason_null_check,
3812 Deoptimization::Action_none);
3813 }
3814 region->init_req(1, control());
3815 }
3816 region->init_req(2, control());
3817 set_control(_gvn.transform(region));
3818 record_for_igvn(region);
3819 if (_gvn.type(val) == TypePtr::NULL_PTR) {
3820 // Since we were just successfully storing null, the array can't be null free.
3821 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
3822 ary_t = ary_t->cast_to_not_null_free();
3823 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
3824 if (safe_for_replace) {
3825 replace_in_map(ary, cast);
3826 }
3827 ary = cast;
3828 }
3829 return ary;
3830 }
3831
3832 //------------------------------next_monitor-----------------------------------
3833 // What number should be given to the next monitor?
3834 int GraphKit::next_monitor() {
3835 int current = jvms()->monitor_depth()* C->sync_stack_slots();
3836 int next = current + C->sync_stack_slots();
3837 // Keep the toplevel high water mark current:
3838 if (C->fixed_slots() < next) C->set_fixed_slots(next);
3839 return current;
3840 }
3841
3842 //------------------------------insert_mem_bar---------------------------------
3843 // Memory barrier to avoid floating things around
3844 // The membar serves as a pinch point between both control and all memory slices.
3845 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3846 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3847 mb->init_req(TypeFunc::Control, control());
3848 mb->init_req(TypeFunc::Memory, reset_memory());
3849 Node* membar = _gvn.transform(mb);
3877 }
3878 Node* membar = _gvn.transform(mb);
3879 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3880 if (alias_idx == Compile::AliasIdxBot) {
3881 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3882 } else {
3883 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3884 }
3885 return membar;
3886 }
3887
3888 //------------------------------shared_lock------------------------------------
3889 // Emit locking code.
3890 FastLockNode* GraphKit::shared_lock(Node* obj) {
3891 // bci is either a monitorenter bc or InvocationEntryBci
3892 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3893 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3894
3895 if( !GenerateSynchronizationCode )
3896 return nullptr; // Not locking things?
3897
3898 if (stopped()) // Dead monitor?
3899 return nullptr;
3900
3901 assert(dead_locals_are_killed(), "should kill locals before sync. point");
3902
3903 // Box the stack location
3904 Node* box = new BoxLockNode(next_monitor());
3905 // Check for bailout after new BoxLockNode
3906 if (failing()) { return nullptr; }
3907 box = _gvn.transform(box);
3908 Node* mem = reset_memory();
3909
3910 FastLockNode * flock = _gvn.transform(new FastLockNode(nullptr, obj, box) )->as_FastLock();
3911
3912 // Add monitor to debug info for the slow path. If we block inside the
3913 // slow path and de-opt, we need the monitor hanging around
3914 map()->push_monitor( flock );
3915
3916 const TypeFunc *tf = LockNode::lock_type();
3917 LockNode *lock = new LockNode(C, tf);
3946 }
3947 #endif
3948
3949 return flock;
3950 }
3951
3952
3953 //------------------------------shared_unlock----------------------------------
3954 // Emit unlocking code.
3955 void GraphKit::shared_unlock(Node* box, Node* obj) {
3956 // bci is either a monitorenter bc or InvocationEntryBci
3957 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3958 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3959
3960 if( !GenerateSynchronizationCode )
3961 return;
3962 if (stopped()) { // Dead monitor?
3963 map()->pop_monitor(); // Kill monitor from debug info
3964 return;
3965 }
3966 assert(!obj->is_InlineType(), "should not unlock on inline type");
3967
3968 // Memory barrier to avoid floating things down past the locked region
3969 insert_mem_bar(Op_MemBarReleaseLock);
3970
3971 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3972 UnlockNode *unlock = new UnlockNode(C, tf);
3973 #ifdef ASSERT
3974 unlock->set_dbg_jvms(sync_jvms());
3975 #endif
3976 uint raw_idx = Compile::AliasIdxRaw;
3977 unlock->init_req( TypeFunc::Control, control() );
3978 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3979 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3980 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3981 unlock->init_req( TypeFunc::ReturnAdr, top() );
3982
3983 unlock->init_req(TypeFunc::Parms + 0, obj);
3984 unlock->init_req(TypeFunc::Parms + 1, box);
3985 unlock = _gvn.transform(unlock)->as_Unlock();
3986
3987 Node* mem = reset_memory();
3988
3989 // unlock has no side-effects, sets few values
3990 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3991
3992 // Kill monitor from debug info
3993 map()->pop_monitor( );
3994 }
3995
3996 //-------------------------------get_layout_helper-----------------------------
3997 // If the given klass is a constant or known to be an array,
3998 // fetch the constant layout helper value into constant_value
3999 // and return null. Otherwise, load the non-constant
4000 // layout helper value, and return the node which represents it.
4001 // This two-faced routine is useful because allocation sites
4002 // almost always feature constant types.
4003 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
4004 const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
4005 if (!StressReflectiveCode && klass_t != nullptr) {
4006 bool xklass = klass_t->klass_is_exact();
4007 bool can_be_flat = false;
4008 const TypeAryPtr* ary_type = klass_t->as_instance_type()->isa_aryptr();
4009 if (UseArrayFlattening && !xklass && ary_type != nullptr && !ary_type->is_null_free()) {
4010 // Don't constant fold if the runtime type might be a flat array but the static type is not.
4011 const TypeOopPtr* elem = ary_type->elem()->make_oopptr();
4012 can_be_flat = ary_type->can_be_inline_array() && (!elem->is_inlinetypeptr() || elem->inline_klass()->flat_in_array());
4013 }
4014 if (!can_be_flat && (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM))) {
4015 jint lhelper;
4016 if (klass_t->is_flat()) {
4017 lhelper = ary_type->flat_layout_helper();
4018 } else if (klass_t->isa_aryklassptr()) {
4019 BasicType elem = ary_type->elem()->array_element_basic_type();
4020 if (is_reference_type(elem, true)) {
4021 elem = T_OBJECT;
4022 }
4023 lhelper = Klass::array_layout_helper(elem);
4024 } else {
4025 lhelper = klass_t->is_instklassptr()->exact_klass()->layout_helper();
4026 }
4027 if (lhelper != Klass::_lh_neutral_value) {
4028 constant_value = lhelper;
4029 return (Node*) nullptr;
4030 }
4031 }
4032 }
4033 constant_value = Klass::_lh_neutral_value; // put in a known value
4034 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
4035 return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered);
4036 }
4037
4038 // We just put in an allocate/initialize with a big raw-memory effect.
4039 // Hook selected additional alias categories on the initialization.
4040 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
4041 MergeMemNode* init_in_merge,
4042 Node* init_out_raw) {
4043 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
4044 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
4045
4046 Node* prevmem = kit.memory(alias_idx);
4047 init_in_merge->set_memory_at(alias_idx, prevmem);
4048 if (init_out_raw != nullptr) {
4049 kit.set_memory(init_out_raw, alias_idx);
4050 }
4051 }
4052
4053 //---------------------------set_output_for_allocation-------------------------
4054 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
4055 const TypeOopPtr* oop_type,
4056 bool deoptimize_on_exception) {
4057 int rawidx = Compile::AliasIdxRaw;
4058 alloc->set_req( TypeFunc::FramePtr, frameptr() );
4059 add_safepoint_edges(alloc);
4060 Node* allocx = _gvn.transform(alloc);
4061 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
4062 // create memory projection for i_o
4063 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
4064 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
4065
4066 // create a memory projection as for the normal control path
4067 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
4068 set_memory(malloc, rawidx);
4069
4070 // a normal slow-call doesn't change i_o, but an allocation does
4071 // we create a separate i_o projection for the normal control path
4072 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
4073 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
4074
4075 // put in an initialization barrier
4076 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
4077 rawoop)->as_Initialize();
4078 assert(alloc->initialization() == init, "2-way macro link must work");
4079 assert(init ->allocation() == alloc, "2-way macro link must work");
4080 {
4081 // Extract memory strands which may participate in the new object's
4082 // initialization, and source them from the new InitializeNode.
4083 // This will allow us to observe initializations when they occur,
4084 // and link them properly (as a group) to the InitializeNode.
4085 assert(init->in(InitializeNode::Memory) == malloc, "");
4086 MergeMemNode* minit_in = MergeMemNode::make(malloc);
4087 init->set_req(InitializeNode::Memory, minit_in);
4088 record_for_igvn(minit_in); // fold it up later, if possible
4089 _gvn.set_type(minit_in, Type::MEMORY);
4090 Node* minit_out = memory(rawidx);
4091 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
4092 // Add an edge in the MergeMem for the header fields so an access
4093 // to one of those has correct memory state
4094 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
4095 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
4096 if (oop_type->isa_aryptr()) {
4097 const TypeAryPtr* arytype = oop_type->is_aryptr();
4098 if (arytype->is_flat()) {
4099 // Initially all flat array accesses share a single slice
4100 // but that changes after parsing. Prepare the memory graph so
4101 // it can optimize flat array accesses properly once they
4102 // don't share a single slice.
4103 assert(C->flat_accesses_share_alias(), "should be set at parse time");
4104 C->set_flat_accesses_share_alias(false);
4105 ciInlineKlass* vk = arytype->elem()->inline_klass();
4106 for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) {
4107 ciField* field = vk->nonstatic_field_at(i);
4108 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
4109 continue; // do not bother to track really large numbers of fields
4110 int off_in_vt = field->offset_in_bytes() - vk->payload_offset();
4111 const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
4112 int fieldidx = C->get_alias_index(adr_type, true);
4113 // Pass nullptr for init_out. Having per flat array element field memory edges as uses of the Initialize node
4114 // can result in per flat array field Phis to be created which confuses the logic of
4115 // Compile::adjust_flat_array_access_aliases().
4116 hook_memory_on_init(*this, fieldidx, minit_in, nullptr);
4117 }
4118 C->set_flat_accesses_share_alias(true);
4119 hook_memory_on_init(*this, C->get_alias_index(TypeAryPtr::INLINES), minit_in, minit_out);
4120 } else {
4121 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
4122 int elemidx = C->get_alias_index(telemref);
4123 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
4124 }
4125 } else if (oop_type->isa_instptr()) {
4126 set_memory(minit_out, C->get_alias_index(oop_type)); // mark word
4127 ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
4128 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
4129 ciField* field = ik->nonstatic_field_at(i);
4130 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
4131 continue; // do not bother to track really large numbers of fields
4132 // Find (or create) the alias category for this field:
4133 int fieldidx = C->alias_type(field)->index();
4134 hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
4135 }
4136 }
4137 }
4138
4139 // Cast raw oop to the real thing...
4140 Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
4141 javaoop = _gvn.transform(javaoop);
4142 C->set_recent_alloc(control(), javaoop);
4143 assert(just_allocated_object(control()) == javaoop, "just allocated");
4144
4145 #ifdef ASSERT
4146 { // Verify that the AllocateNode::Ideal_allocation recognizers work:
4157 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
4158 }
4159 }
4160 #endif //ASSERT
4161
4162 return javaoop;
4163 }
4164
4165 //---------------------------new_instance--------------------------------------
4166 // This routine takes a klass_node which may be constant (for a static type)
4167 // or may be non-constant (for reflective code). It will work equally well
4168 // for either, and the graph will fold nicely if the optimizer later reduces
4169 // the type to a constant.
4170 // The optional arguments are for specialized use by intrinsics:
4171 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
4172 // - If 'return_size_val', report the total object size to the caller.
4173 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
4174 Node* GraphKit::new_instance(Node* klass_node,
4175 Node* extra_slow_test,
4176 Node* *return_size_val,
4177 bool deoptimize_on_exception,
4178 InlineTypeNode* inline_type_node) {
4179 // Compute size in doublewords
4180 // The size is always an integral number of doublewords, represented
4181 // as a positive bytewise size stored in the klass's layout_helper.
4182 // The layout_helper also encodes (in a low bit) the need for a slow path.
4183 jint layout_con = Klass::_lh_neutral_value;
4184 Node* layout_val = get_layout_helper(klass_node, layout_con);
4185 bool layout_is_con = (layout_val == nullptr);
4186
4187 if (extra_slow_test == nullptr) extra_slow_test = intcon(0);
4188 // Generate the initial go-slow test. It's either ALWAYS (return a
4189 // Node for 1) or NEVER (return a null) or perhaps (in the reflective
4190 // case) a computed value derived from the layout_helper.
4191 Node* initial_slow_test = nullptr;
4192 if (layout_is_con) {
4193 assert(!StressReflectiveCode, "stress mode does not use these paths");
4194 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
4195 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
4196 } else { // reflective case
4197 // This reflective path is used by Unsafe.allocateInstance.
4198 // (It may be stress-tested by specifying StressReflectiveCode.)
4199 // Basically, we want to get into the VM is there's an illegal argument.
4200 Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
4201 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
4202 if (extra_slow_test != intcon(0)) {
4203 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
4204 }
4205 // (Macro-expander will further convert this to a Bool, if necessary.)
4216
4217 // Clear the low bits to extract layout_helper_size_in_bytes:
4218 assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
4219 Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
4220 size = _gvn.transform( new AndXNode(size, mask) );
4221 }
4222 if (return_size_val != nullptr) {
4223 (*return_size_val) = size;
4224 }
4225
4226 // This is a precise notnull oop of the klass.
4227 // (Actually, it need not be precise if this is a reflective allocation.)
4228 // It's what we cast the result to.
4229 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
4230 if (!tklass) tklass = TypeInstKlassPtr::OBJECT;
4231 const TypeOopPtr* oop_type = tklass->as_instance_type();
4232
4233 // Now generate allocation code
4234
4235 // The entire memory state is needed for slow path of the allocation
4236 // since GC and deoptimization can happen.
4237 Node *mem = reset_memory();
4238 set_all_memory(mem); // Create new memory state
4239
4240 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
4241 control(), mem, i_o(),
4242 size, klass_node,
4243 initial_slow_test, inline_type_node);
4244
4245 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
4246 }
4247
4248 //-------------------------------new_array-------------------------------------
4249 // helper for newarray and anewarray
4250 // The 'length' parameter is (obviously) the length of the array.
4251 // The optional arguments are for specialized use by intrinsics:
4252 // - If 'return_size_val', report the non-padded array size (sum of header size
4253 // and array body) to the caller.
4254 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
4255 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
4256 Node* length, // number of array elements
4257 int nargs, // number of arguments to push back for uncommon trap
4258 Node* *return_size_val,
4259 bool deoptimize_on_exception) {
4260 jint layout_con = Klass::_lh_neutral_value;
4261 Node* layout_val = get_layout_helper(klass_node, layout_con);
4262 bool layout_is_con = (layout_val == nullptr);
4263
4264 if (!layout_is_con && !StressReflectiveCode &&
4265 !too_many_traps(Deoptimization::Reason_class_check)) {
4266 // This is a reflective array creation site.
4267 // Optimistically assume that it is a subtype of Object[],
4268 // so that we can fold up all the address arithmetic.
4269 layout_con = Klass::array_layout_helper(T_OBJECT);
4270 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
4271 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
4272 { BuildCutout unless(this, bol_lh, PROB_MAX);
4273 inc_sp(nargs);
4274 uncommon_trap(Deoptimization::Reason_class_check,
4275 Deoptimization::Action_maybe_recompile);
4276 }
4277 layout_val = nullptr;
4278 layout_is_con = true;
4279 }
4280
4281 // Generate the initial go-slow test. Make sure we do not overflow
4282 // if length is huge (near 2Gig) or negative! We do not need
4283 // exact double-words here, just a close approximation of needed
4284 // double-words. We can't add any offset or rounding bits, lest we
4285 // take a size -1 of bytes and make it positive. Use an unsigned
4286 // compare, so negative sizes look hugely positive.
4287 int fast_size_limit = FastAllocateSizeLimit;
4288 if (layout_is_con) {
4289 assert(!StressReflectiveCode, "stress mode does not use these paths");
4290 // Increase the size limit if we have exact knowledge of array type.
4291 int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
4292 fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0);
4293 }
4294
4295 Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
4296 Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
4297
4298 // --- Size Computation ---
4299 // array_size = round_to_heap(array_header + (length << elem_shift));
4300 // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
4301 // and align_to(x, y) == ((x + y-1) & ~(y-1))
4302 // The rounding mask is strength-reduced, if possible.
4303 int round_mask = MinObjAlignmentInBytes - 1;
4304 Node* header_size = nullptr;
4305 // (T_BYTE has the weakest alignment and size restrictions...)
4306 if (layout_is_con) {
4307 int hsize = Klass::layout_helper_header_size(layout_con);
4308 int eshift = Klass::layout_helper_log2_element_size(layout_con);
4309 bool is_flat_array = Klass::layout_helper_is_flatArray(layout_con);
4310 if ((round_mask & ~right_n_bits(eshift)) == 0)
4311 round_mask = 0; // strength-reduce it if it goes away completely
4312 assert(is_flat_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
4313 int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
4314 assert(header_size_min <= hsize, "generic minimum is smallest");
4315 header_size = intcon(hsize);
4316 } else {
4317 Node* hss = intcon(Klass::_lh_header_size_shift);
4318 Node* hsm = intcon(Klass::_lh_header_size_mask);
4319 header_size = _gvn.transform(new URShiftINode(layout_val, hss));
4320 header_size = _gvn.transform(new AndINode(header_size, hsm));
4321 }
4322
4323 Node* elem_shift = nullptr;
4324 if (layout_is_con) {
4325 int eshift = Klass::layout_helper_log2_element_size(layout_con);
4326 if (eshift != 0)
4327 elem_shift = intcon(eshift);
4328 } else {
4329 // There is no need to mask or shift this value.
4330 // The semantics of LShiftINode include an implicit mask to 0x1F.
4331 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
4332 elem_shift = layout_val;
4379 }
4380 Node* non_rounded_size = _gvn.transform(new AddXNode(headerx, abody));
4381
4382 if (return_size_val != nullptr) {
4383 // This is the size
4384 (*return_size_val) = non_rounded_size;
4385 }
4386
4387 Node* size = non_rounded_size;
4388 if (round_mask != 0) {
4389 Node* mask1 = MakeConX(round_mask);
4390 size = _gvn.transform(new AddXNode(size, mask1));
4391 Node* mask2 = MakeConX(~round_mask);
4392 size = _gvn.transform(new AndXNode(size, mask2));
4393 }
4394 // else if round_mask == 0, the size computation is self-rounding
4395
4396 // Now generate allocation code
4397
4398 // The entire memory state is needed for slow path of the allocation
4399 // since GC and deoptimization can happen.
4400 Node *mem = reset_memory();
4401 set_all_memory(mem); // Create new memory state
4402
4403 if (initial_slow_test->is_Bool()) {
4404 // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
4405 initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
4406 }
4407
4408 const TypeKlassPtr* ary_klass = _gvn.type(klass_node)->isa_klassptr();
4409 const TypeOopPtr* ary_type = ary_klass->as_instance_type();
4410 const TypeAryPtr* ary_ptr = ary_type->isa_aryptr();
4411
4412 // Inline type array variants:
4413 // - null-ok: ciObjArrayKlass with is_elem_null_free() = false
4414 // - null-free: ciObjArrayKlass with is_elem_null_free() = true
4415 // - null-free, flat: ciFlatArrayKlass with is_elem_null_free() = true
4416 // Check if array is a null-free, non-flat inline type array
4417 // that needs to be initialized with the default inline type.
4418 Node* default_value = nullptr;
4419 Node* raw_default_value = nullptr;
4420 if (ary_ptr != nullptr && ary_ptr->klass_is_exact()) {
4421 // Array type is known
4422 if (ary_ptr->is_null_free() && !ary_ptr->is_flat()) {
4423 ciInlineKlass* vk = ary_ptr->elem()->inline_klass();
4424 default_value = InlineTypeNode::default_oop(gvn(), vk);
4425 if (UseCompressedOops) {
4426 // With compressed oops, the 64-bit init value is built from two 32-bit compressed oops
4427 default_value = _gvn.transform(new EncodePNode(default_value, default_value->bottom_type()->make_narrowoop()));
4428 Node* lower = _gvn.transform(new CastP2XNode(control(), default_value));
4429 Node* upper = _gvn.transform(new LShiftLNode(lower, intcon(32)));
4430 raw_default_value = _gvn.transform(new OrLNode(lower, upper));
4431 } else {
4432 raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value));
4433 }
4434 }
4435 }
4436
4437 Node* valid_length_test = _gvn.intcon(1);
4438 if (ary_type->isa_aryptr()) {
4439 BasicType bt = ary_type->isa_aryptr()->elem()->array_element_basic_type();
4440 jint max = TypeAryPtr::max_array_length(bt);
4441 Node* valid_length_cmp = _gvn.transform(new CmpUNode(length, intcon(max)));
4442 valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le));
4443 }
4444
4445 // Create the AllocateArrayNode and its result projections
4446 AllocateArrayNode* alloc
4447 = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
4448 control(), mem, i_o(),
4449 size, klass_node,
4450 initial_slow_test,
4451 length, valid_length_test,
4452 default_value, raw_default_value);
4453 // Cast to correct type. Note that the klass_node may be constant or not,
4454 // and in the latter case the actual array type will be inexact also.
4455 // (This happens via a non-constant argument to inline_native_newArray.)
4456 // In any case, the value of klass_node provides the desired array type.
4457 const TypeInt* length_type = _gvn.find_int_type(length);
4458 if (ary_type->isa_aryptr() && length_type != nullptr) {
4459 // Try to get a better type than POS for the size
4460 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4461 }
4462
4463 Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
4464
4465 array_ideal_length(alloc, ary_type, true);
4466 return javaoop;
4467 }
4468
4469 // The following "Ideal_foo" functions are placed here because they recognize
4470 // the graph shapes created by the functions immediately above.
4471
4472 //---------------------------Ideal_allocation----------------------------------
4580 set_all_memory(ideal.merged_memory());
4581 set_i_o(ideal.i_o());
4582 set_control(ideal.ctrl());
4583 }
4584
4585 void GraphKit::final_sync(IdealKit& ideal) {
4586 // Final sync IdealKit and graphKit.
4587 sync_kit(ideal);
4588 }
4589
4590 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4591 Node* len = load_array_length(load_String_value(str, set_ctrl));
4592 Node* coder = load_String_coder(str, set_ctrl);
4593 // Divide length by 2 if coder is UTF16
4594 return _gvn.transform(new RShiftINode(len, coder));
4595 }
4596
4597 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4598 int value_offset = java_lang_String::value_offset();
4599 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4600 false, nullptr, Type::Offset(0));
4601 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4602 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4603 TypeAry::make(TypeInt::BYTE, TypeInt::POS, false, false, true, true),
4604 ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0));
4605 Node* p = basic_plus_adr(str, str, value_offset);
4606 Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4607 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4608 return load;
4609 }
4610
4611 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4612 if (!CompactStrings) {
4613 return intcon(java_lang_String::CODER_UTF16);
4614 }
4615 int coder_offset = java_lang_String::coder_offset();
4616 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4617 false, nullptr, Type::Offset(0));
4618 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4619
4620 Node* p = basic_plus_adr(str, str, coder_offset);
4621 Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4622 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4623 return load;
4624 }
4625
4626 void GraphKit::store_String_value(Node* str, Node* value) {
4627 int value_offset = java_lang_String::value_offset();
4628 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4629 false, nullptr, Type::Offset(0));
4630 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4631
4632 access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,
4633 value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4634 }
4635
4636 void GraphKit::store_String_coder(Node* str, Node* value) {
4637 int coder_offset = java_lang_String::coder_offset();
4638 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4639 false, nullptr, Type::Offset(0));
4640 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4641
4642 access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4643 value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4644 }
4645
4646 // Capture src and dst memory state with a MergeMemNode
4647 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4648 if (src_type == dst_type) {
4649 // Types are equal, we don't need a MergeMemNode
4650 return memory(src_type);
4651 }
4652 MergeMemNode* merge = MergeMemNode::make(map()->memory());
4653 record_for_igvn(merge); // fold it up later, if possible
4654 int src_idx = C->get_alias_index(src_type);
4655 int dst_idx = C->get_alias_index(dst_type);
4656 merge->set_memory_at(src_idx, memory(src_idx));
4657 merge->set_memory_at(dst_idx, memory(dst_idx));
4658 return merge;
4659 }
4732 i_char->init_req(2, AddI(i_char, intcon(2)));
4733
4734 set_control(IfFalse(iff));
4735 set_memory(st, TypeAryPtr::BYTES);
4736 }
4737
4738 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4739 if (!field->is_constant()) {
4740 return nullptr; // Field not marked as constant.
4741 }
4742 ciInstance* holder = nullptr;
4743 if (!field->is_static()) {
4744 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4745 if (const_oop != nullptr && const_oop->is_instance()) {
4746 holder = const_oop->as_instance();
4747 }
4748 }
4749 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4750 /*is_unsigned_load=*/false);
4751 if (con_type != nullptr) {
4752 Node* con = makecon(con_type);
4753 if (field->type()->is_inlinetype()) {
4754 con = InlineTypeNode::make_from_oop(this, con, field->type()->as_inline_klass(), field->is_null_free());
4755 } else if (con_type->is_inlinetypeptr()) {
4756 con = InlineTypeNode::make_from_oop(this, con, con_type->inline_klass(), field->is_null_free());
4757 }
4758 return con;
4759 }
4760 return nullptr;
4761 }
4762
4763 //---------------------------load_mirror_from_klass----------------------------
4764 // Given a klass oop, load its java mirror (a java.lang.Class oop).
4765 Node* GraphKit::load_mirror_from_klass(Node* klass) {
4766 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
4767 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4768 // mirror = ((OopHandle)mirror)->resolve();
4769 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
4770 }
4771
4772 Node* GraphKit::maybe_narrow_object_type(Node* obj, ciKlass* type) {
4773 const Type* obj_type = obj->bottom_type();
4774 const TypeOopPtr* sig_type = TypeOopPtr::make_from_klass(type);
4775 if (obj_type->isa_oopptr() && sig_type->is_loaded() && !obj_type->higher_equal(sig_type)) {
4776 const Type* narrow_obj_type = obj_type->filter_speculative(sig_type); // keep speculative part
4777 Node* casted_obj = gvn().transform(new CheckCastPPNode(control(), obj, narrow_obj_type));
4778 obj = casted_obj;
4779 }
4780 if (sig_type->is_inlinetypeptr()) {
4781 obj = InlineTypeNode::make_from_oop(this, obj, sig_type->inline_klass(), !gvn().type(obj)->maybe_null());
4782 }
4783 return obj;
4784 }
|