12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/c2/barrierSetC2.hpp"
30 #include "libadt/vectset.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "opto/c2compiler.hpp"
34 #include "opto/arraycopynode.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/cfgnode.hpp"
37 #include "opto/compile.hpp"
38 #include "opto/escape.hpp"
39 #include "opto/macro.hpp"
40 #include "opto/phaseX.hpp"
41 #include "opto/movenode.hpp"
42 #include "opto/rootnode.hpp"
43 #include "utilities/macros.hpp"
44
45 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
46 // If ReduceAllocationMerges is enabled we might call split_through_phi during
47 // split_unique_types and that will create additional nodes that need to be
48 // pushed to the ConnectionGraph. The code below bumps the initial capacity of
49 // _nodes by 10% to account for these additional nodes. If capacity is exceeded
50 // the array will be reallocated.
51 _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
144 GrowableArray<SafePointNode*> sfn_worklist;
145 GrowableArray<MergeMemNode*> mergemem_worklist;
146 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
147
148 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
149
150 // 1. Populate Connection Graph (CG) with PointsTo nodes.
151 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space
152 // Initialize worklist
153 if (C->root() != nullptr) {
154 ideal_nodes.push(C->root());
155 }
156 // Processed ideal nodes are unique on ideal_nodes list
157 // but several ideal nodes are mapped to the phantom_obj.
158 // To avoid duplicated entries on the following worklists
159 // add the phantom_obj only once to them.
160 ptnodes_worklist.append(phantom_obj);
161 java_objects_worklist.append(phantom_obj);
162 for( uint next = 0; next < ideal_nodes.size(); ++next ) {
163 Node* n = ideal_nodes.at(next);
164 // Create PointsTo nodes and add them to Connection Graph. Called
165 // only once per ideal node since ideal_nodes is Unique_Node list.
166 add_node_to_connection_graph(n, &delayed_worklist);
167 PointsToNode* ptn = ptnode_adr(n->_idx);
168 if (ptn != nullptr && ptn != phantom_obj) {
169 ptnodes_worklist.append(ptn);
170 if (ptn->is_JavaObject()) {
171 java_objects_worklist.append(ptn->as_JavaObject());
172 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
173 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
174 // Only allocations and java static calls results are interesting.
175 non_escaped_allocs_worklist.append(ptn->as_JavaObject());
176 }
177 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
178 oop_fields_worklist.append(ptn->as_Field());
179 }
180 }
181 // Collect some interesting nodes for further use.
182 switch (n->Opcode()) {
183 case Op_MergeMem:
676
677 // The next two inputs are:
678 // (1) A copy of the original pointer to NSR objects.
679 // (2) A selector, used to decide if we need to rematerialize an object
680 // or use the pointer to a NSR object.
681 // See more details of these fields in the declaration of SafePointScalarMergeNode
682 sfpt->add_req(ophi);
683 sfpt->add_req(selector);
684
685 for (uint i = 1; i < ophi->req(); i++) {
686 Node* base = ophi->in(i);
687 JavaObjectNode* ptn = unique_java_object(base);
688
689 // If the base is not scalar replaceable we don't need to register information about
690 // it at this time.
691 if (ptn == nullptr || !ptn->scalar_replaceable()) {
692 continue;
693 }
694
695 AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
696 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt);
697 if (sobj == nullptr) {
698 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
699 return;
700 }
701
702 // Now make a pass over the debug information replacing any references
703 // to the allocated object with "sobj"
704 Node* ccpp = alloc->result_cast();
705 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
706
707 // Register the scalarized object as a candidate for reallocation
708 smerge->add_req(sobj);
709 }
710
711 // Replaces debug information references to "ophi" in "sfpt" with references to "smerge"
712 sfpt->replace_edges_in_range(ophi, smerge, debug_start, jvms->debug_end(), _igvn);
713
714 // The call to 'replace_edges_in_range' above might have removed the
715 // reference to ophi that we need at _merge_pointer_idx. The line below make
716 // sure the reference is maintained.
840 return false;
841 }
842
843 // Returns true if at least one of the arguments to the call is an object
844 // that does not escape globally.
845 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
846 if (call->method() != nullptr) {
847 uint max_idx = TypeFunc::Parms + call->method()->arg_size();
848 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
849 Node* p = call->in(idx);
850 if (not_global_escape(p)) {
851 return true;
852 }
853 }
854 } else {
855 const char* name = call->as_CallStaticJava()->_name;
856 assert(name != nullptr, "no name");
857 // no arg escapes through uncommon traps
858 if (strcmp(name, "uncommon_trap") != 0) {
859 // process_call_arguments() assumes that all arguments escape globally
860 const TypeTuple* d = call->tf()->domain();
861 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
862 const Type* at = d->field_at(i);
863 if (at->isa_oopptr() != nullptr) {
864 return true;
865 }
866 }
867 }
868 }
869 return false;
870 }
871
872
873
874 // Utility function for nodes that load an object
875 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
876 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
877 // ThreadLocal has RawPtr type.
878 const Type* t = _igvn->type(n);
879 if (t->make_ptr() != nullptr) {
880 Node* adr = n->in(MemNode::Address);
914 // first IGVN optimization when escape information is still available.
915 record_for_optimizer(n);
916 } else if (n->is_Allocate()) {
917 add_call_node(n->as_Call());
918 record_for_optimizer(n);
919 } else {
920 if (n->is_CallStaticJava()) {
921 const char* name = n->as_CallStaticJava()->_name;
922 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
923 return; // Skip uncommon traps
924 }
925 }
926 // Don't mark as processed since call's arguments have to be processed.
927 delayed_worklist->push(n);
928 // Check if a call returns an object.
929 if ((n->as_Call()->returns_pointer() &&
930 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
931 (n->is_CallStaticJava() &&
932 n->as_CallStaticJava()->is_boxing_method())) {
933 add_call_node(n->as_Call());
934 }
935 }
936 return;
937 }
938 // Put this check here to process call arguments since some call nodes
939 // point to phantom_obj.
940 if (n_ptn == phantom_obj || n_ptn == null_obj) {
941 return; // Skip predefined nodes.
942 }
943 switch (opcode) {
944 case Op_AddP: {
945 Node* base = get_addp_base(n);
946 PointsToNode* ptn_base = ptnode_adr(base->_idx);
947 // Field nodes are created for all field types. They are used in
948 // adjust_scalar_replaceable_state() and split_unique_types().
949 // Note, non-oop fields will have only base edges in Connection
950 // Graph because such fields are not used for oop loads and stores.
951 int offset = address_offset(n, igvn);
952 add_field(n, PointsToNode::NoEscape, offset);
953 if (ptn_base == nullptr) {
954 delayed_worklist->push(n); // Process it later.
955 } else {
956 n_ptn = ptnode_adr(n_idx);
957 add_base(n_ptn->as_Field(), ptn_base);
958 }
959 break;
960 }
961 case Op_CastX2P: {
962 map_ideal_node(n, phantom_obj);
963 break;
964 }
965 case Op_CastPP:
966 case Op_CheckCastPP:
967 case Op_EncodeP:
968 case Op_DecodeN:
969 case Op_EncodePKlass:
970 case Op_DecodeNKlass: {
971 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
972 break;
973 }
974 case Op_CMoveP: {
975 add_local_var(n, PointsToNode::NoEscape);
976 // Do not add edges during first iteration because some could be
977 // not defined yet.
978 delayed_worklist->push(n);
979 break;
980 }
981 case Op_ConP:
982 case Op_ConN:
983 case Op_ConNKlass: {
984 // assume all oop constants globally escape except for null
1016 case Op_PartialSubtypeCheck: {
1017 // Produces Null or notNull and is used in only in CmpP so
1018 // phantom_obj could be used.
1019 map_ideal_node(n, phantom_obj); // Result is unknown
1020 break;
1021 }
1022 case Op_Phi: {
1023 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1024 // ThreadLocal has RawPtr type.
1025 const Type* t = n->as_Phi()->type();
1026 if (t->make_ptr() != nullptr) {
1027 add_local_var(n, PointsToNode::NoEscape);
1028 // Do not add edges during first iteration because some could be
1029 // not defined yet.
1030 delayed_worklist->push(n);
1031 }
1032 break;
1033 }
1034 case Op_Proj: {
1035 // we are only interested in the oop result projection from a call
1036 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1037 n->in(0)->as_Call()->returns_pointer()) {
1038 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1039 }
1040 break;
1041 }
1042 case Op_Rethrow: // Exception object escapes
1043 case Op_Return: {
1044 if (n->req() > TypeFunc::Parms &&
1045 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1046 // Treat Return value as LocalVar with GlobalEscape escape state.
1047 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1048 }
1049 break;
1050 }
1051 case Op_CompareAndExchangeP:
1052 case Op_CompareAndExchangeN:
1053 case Op_GetAndSetP:
1054 case Op_GetAndSetN: {
1055 add_objload_to_connection_graph(n, delayed_worklist);
1056 // fall-through
1057 }
1119 if (n->is_Call()) {
1120 process_call_arguments(n->as_Call());
1121 return;
1122 }
1123 assert(n->is_Store() || n->is_LoadStore() ||
1124 (n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr),
1125 "node should be registered already");
1126 int opcode = n->Opcode();
1127 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1128 if (gc_handled) {
1129 return; // Ignore node if already handled by GC.
1130 }
1131 switch (opcode) {
1132 case Op_AddP: {
1133 Node* base = get_addp_base(n);
1134 PointsToNode* ptn_base = ptnode_adr(base->_idx);
1135 assert(ptn_base != nullptr, "field's base should be registered");
1136 add_base(n_ptn->as_Field(), ptn_base);
1137 break;
1138 }
1139 case Op_CastPP:
1140 case Op_CheckCastPP:
1141 case Op_EncodeP:
1142 case Op_DecodeN:
1143 case Op_EncodePKlass:
1144 case Op_DecodeNKlass: {
1145 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1146 break;
1147 }
1148 case Op_CMoveP: {
1149 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1150 Node* in = n->in(i);
1151 if (in == nullptr) {
1152 continue; // ignore null
1153 }
1154 Node* uncast_in = in->uncast();
1155 if (uncast_in->is_top() || uncast_in == n) {
1156 continue; // ignore top or inputs which go back this node
1157 }
1158 PointsToNode* ptn = ptnode_adr(in->_idx);
1173 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1174 // ThreadLocal has RawPtr type.
1175 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1176 for (uint i = 1; i < n->req(); i++) {
1177 Node* in = n->in(i);
1178 if (in == nullptr) {
1179 continue; // ignore null
1180 }
1181 Node* uncast_in = in->uncast();
1182 if (uncast_in->is_top() || uncast_in == n) {
1183 continue; // ignore top or inputs which go back this node
1184 }
1185 PointsToNode* ptn = ptnode_adr(in->_idx);
1186 assert(ptn != nullptr, "node should be registered");
1187 add_edge(n_ptn, ptn);
1188 }
1189 break;
1190 }
1191 case Op_Proj: {
1192 // we are only interested in the oop result projection from a call
1193 assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1194 n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
1195 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1196 break;
1197 }
1198 case Op_Rethrow: // Exception object escapes
1199 case Op_Return: {
1200 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1201 "Unexpected node type");
1202 // Treat Return value as LocalVar with GlobalEscape escape state.
1203 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1204 break;
1205 }
1206 case Op_CompareAndExchangeP:
1207 case Op_CompareAndExchangeN:
1208 case Op_GetAndSetP:
1209 case Op_GetAndSetN:{
1210 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1211 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1212 // fall-through
1213 }
1214 case Op_CompareAndSwapP:
1350 PointsToNode* ptn = ptnode_adr(val->_idx);
1351 assert(ptn != nullptr, "node should be registered");
1352 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1353 // Add edge to object for unsafe access with offset.
1354 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1355 assert(adr_ptn != nullptr, "node should be registered");
1356 if (adr_ptn->is_Field()) {
1357 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
1358 add_edge(adr_ptn, ptn);
1359 }
1360 return true;
1361 }
1362 #ifdef ASSERT
1363 n->dump(1);
1364 assert(false, "not unsafe");
1365 #endif
1366 return false;
1367 }
1368
1369 void ConnectionGraph::add_call_node(CallNode* call) {
1370 assert(call->returns_pointer(), "only for call which returns pointer");
1371 uint call_idx = call->_idx;
1372 if (call->is_Allocate()) {
1373 Node* k = call->in(AllocateNode::KlassNode);
1374 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
1375 assert(kt != nullptr, "TypeKlassPtr required.");
1376 PointsToNode::EscapeState es = PointsToNode::NoEscape;
1377 bool scalar_replaceable = true;
1378 NOT_PRODUCT(const char* nsr_reason = "");
1379 if (call->is_AllocateArray()) {
1380 if (!kt->isa_aryklassptr()) { // StressReflectiveCode
1381 es = PointsToNode::GlobalEscape;
1382 } else {
1383 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
1384 if (length < 0) {
1385 // Not scalar replaceable if the length is not constant.
1386 scalar_replaceable = false;
1387 NOT_PRODUCT(nsr_reason = "has a non-constant length");
1388 } else if (length > EliminateAllocationArraySizeLimit) {
1389 // Not scalar replaceable if the length is too big.
1390 scalar_replaceable = false;
1426 //
1427 // - all oop arguments are escaping globally;
1428 //
1429 // 2. CallStaticJavaNode (execute bytecode analysis if possible):
1430 //
1431 // - the same as CallDynamicJavaNode if can't do bytecode analysis;
1432 //
1433 // - mapped to GlobalEscape JavaObject node if unknown oop is returned;
1434 // - mapped to NoEscape JavaObject node if non-escaping object allocated
1435 // during call is returned;
1436 // - mapped to ArgEscape LocalVar node pointed to object arguments
1437 // which are returned and does not escape during call;
1438 //
1439 // - oop arguments escaping status is defined by bytecode analysis;
1440 //
1441 // For a static call, we know exactly what method is being called.
1442 // Use bytecode estimator to record whether the call's return value escapes.
1443 ciMethod* meth = call->as_CallJava()->method();
1444 if (meth == nullptr) {
1445 const char* name = call->as_CallStaticJava()->_name;
1446 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
1447 // Returns a newly allocated non-escaped object.
1448 add_java_object(call, PointsToNode::NoEscape);
1449 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
1450 } else if (meth->is_boxing_method()) {
1451 // Returns boxing object
1452 PointsToNode::EscapeState es;
1453 vmIntrinsics::ID intr = meth->intrinsic_id();
1454 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
1455 // It does not escape if object is always allocated.
1456 es = PointsToNode::NoEscape;
1457 } else {
1458 // It escapes globally if object could be loaded from cache.
1459 es = PointsToNode::GlobalEscape;
1460 }
1461 add_java_object(call, es);
1462 if (es == PointsToNode::GlobalEscape) {
1463 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
1464 }
1465 } else {
1466 BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
1467 call_analyzer->copy_dependencies(_compile->dependencies());
1468 if (call_analyzer->is_return_allocated()) {
1469 // Returns a newly allocated non-escaped object, simply
1470 // update dependency information.
1471 // Mark it as NoEscape so that objects referenced by
1472 // it's fields will be marked as NoEscape at least.
1473 add_java_object(call, PointsToNode::NoEscape);
1474 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
1475 } else {
1476 // Determine whether any arguments are returned.
1477 const TypeTuple* d = call->tf()->domain();
1478 bool ret_arg = false;
1479 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1480 if (d->field_at(i)->isa_ptr() != nullptr &&
1481 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1482 ret_arg = true;
1483 break;
1484 }
1485 }
1486 if (ret_arg) {
1487 add_local_var(call, PointsToNode::ArgEscape);
1488 } else {
1489 // Returns unknown object.
1490 map_ideal_node(call, phantom_obj);
1491 }
1492 }
1493 }
1494 } else {
1495 // An other type of call, assume the worst case:
1496 // returned value is unknown and globally escapes.
1497 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
1505 #ifdef ASSERT
1506 case Op_Allocate:
1507 case Op_AllocateArray:
1508 case Op_Lock:
1509 case Op_Unlock:
1510 assert(false, "should be done already");
1511 break;
1512 #endif
1513 case Op_ArrayCopy:
1514 case Op_CallLeafNoFP:
1515 // Most array copies are ArrayCopy nodes at this point but there
1516 // are still a few direct calls to the copy subroutines (See
1517 // PhaseStringOpts::copy_string())
1518 is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
1519 call->as_CallLeaf()->is_call_to_arraycopystub();
1520 // fall through
1521 case Op_CallLeafVector:
1522 case Op_CallLeaf: {
1523 // Stub calls, objects do not escape but they are not scale replaceable.
1524 // Adjust escape state for outgoing arguments.
1525 const TypeTuple * d = call->tf()->domain();
1526 bool src_has_oops = false;
1527 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1528 const Type* at = d->field_at(i);
1529 Node *arg = call->in(i);
1530 if (arg == nullptr) {
1531 continue;
1532 }
1533 const Type *aat = _igvn->type(arg);
1534 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
1535 continue;
1536 }
1537 if (arg->is_AddP()) {
1538 //
1539 // The inline_native_clone() case when the arraycopy stub is called
1540 // after the allocation before Initialize and CheckCastPP nodes.
1541 // Or normal arraycopy for object arrays case.
1542 //
1543 // Set AddP's base (Allocate) as not scalar replaceable since
1544 // pointer to the base (with offset) is passed as argument.
1545 //
1546 arg = get_addp_base(arg);
1547 }
1548 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1549 assert(arg_ptn != nullptr, "should be registered");
1550 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
1551 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
1552 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1553 aat->isa_ptr() != nullptr, "expecting an Ptr");
1554 bool arg_has_oops = aat->isa_oopptr() &&
1555 (aat->isa_instptr() ||
1556 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)));
1557 if (i == TypeFunc::Parms) {
1558 src_has_oops = arg_has_oops;
1559 }
1560 //
1561 // src or dst could be j.l.Object when other is basic type array:
1562 //
1563 // arraycopy(char[],0,Object*,0,size);
1564 // arraycopy(Object*,0,char[],0,size);
1565 //
1566 // Don't add edges in such cases.
1567 //
1568 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1569 arg_has_oops && (i > TypeFunc::Parms);
1570 #ifdef ASSERT
1571 if (!(is_arraycopy ||
1572 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1573 (call->as_CallLeaf()->_name != nullptr &&
1574 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1575 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1576 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
1585 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
1586 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1587 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
1588 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1589 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
1590 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
1591 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
1592 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1593 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1594 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1595 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1596 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1597 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1598 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
1599 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
1600 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1601 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1602 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1603 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1604 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
1605 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
1606 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
1607 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1608 strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
1609 strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
1610 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)
1611 ))) {
1612 call->dump();
1613 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1614 }
1615 #endif
1616 // Always process arraycopy's destination object since
1617 // we need to add all possible edges to references in
1618 // source object.
1619 if (arg_esc >= PointsToNode::ArgEscape &&
1620 !arg_is_arraycopy_dest) {
1621 continue;
1622 }
1623 PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1624 if (call->is_ArrayCopy()) {
1649 }
1650 }
1651 }
1652 break;
1653 }
1654 case Op_CallStaticJava: {
1655 // For a static call, we know exactly what method is being called.
1656 // Use bytecode estimator to record the call's escape affects
1657 #ifdef ASSERT
1658 const char* name = call->as_CallStaticJava()->_name;
1659 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1660 #endif
1661 ciMethod* meth = call->as_CallJava()->method();
1662 if ((meth != nullptr) && meth->is_boxing_method()) {
1663 break; // Boxing methods do not modify any oops.
1664 }
1665 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
1666 // fall-through if not a Java method or no analyzer information
1667 if (call_analyzer != nullptr) {
1668 PointsToNode* call_ptn = ptnode_adr(call->_idx);
1669 const TypeTuple* d = call->tf()->domain();
1670 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1671 const Type* at = d->field_at(i);
1672 int k = i - TypeFunc::Parms;
1673 Node* arg = call->in(i);
1674 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1675 if (at->isa_ptr() != nullptr &&
1676 call_analyzer->is_arg_returned(k)) {
1677 // The call returns arguments.
1678 if (call_ptn != nullptr) { // Is call's result used?
1679 assert(call_ptn->is_LocalVar(), "node should be registered");
1680 assert(arg_ptn != nullptr, "node should be registered");
1681 add_edge(call_ptn, arg_ptn);
1682 }
1683 }
1684 if (at->isa_oopptr() != nullptr &&
1685 arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1686 if (!call_analyzer->is_arg_stack(k)) {
1687 // The argument global escapes
1688 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1689 } else {
1693 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1694 }
1695 }
1696 }
1697 }
1698 if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
1699 // The call returns arguments.
1700 assert(call_ptn->edge_count() > 0, "sanity");
1701 if (!call_analyzer->is_return_local()) {
1702 // Returns also unknown object.
1703 add_edge(call_ptn, phantom_obj);
1704 }
1705 }
1706 break;
1707 }
1708 }
1709 default: {
1710 // Fall-through here if not a Java method or no analyzer information
1711 // or some other type of call, assume the worst case: all arguments
1712 // globally escape.
1713 const TypeTuple* d = call->tf()->domain();
1714 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1715 const Type* at = d->field_at(i);
1716 if (at->isa_oopptr() != nullptr) {
1717 Node* arg = call->in(i);
1718 if (arg->is_AddP()) {
1719 arg = get_addp_base(arg);
1720 }
1721 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
1722 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1723 }
1724 }
1725 }
1726 }
1727 }
1728
1729
1730 // Finish Graph construction.
1731 bool ConnectionGraph::complete_connection_graph(
1732 GrowableArray<PointsToNode*>& ptnodes_worklist,
1733 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
2106 PointsToNode* base = i.get();
2107 if (base->is_JavaObject()) {
2108 // Skip Allocate's fields which will be processed later.
2109 if (base->ideal_node()->is_Allocate()) {
2110 return 0;
2111 }
2112 assert(base == null_obj, "only null ptr base expected here");
2113 }
2114 }
2115 if (add_edge(field, phantom_obj)) {
2116 // New edge was added
2117 new_edges++;
2118 add_field_uses_to_worklist(field);
2119 }
2120 return new_edges;
2121 }
2122
2123 // Find fields initializing values for allocations.
2124 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2125 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2126 Node* alloc = pta->ideal_node();
2127
2128 // Do nothing for Allocate nodes since its fields values are
2129 // "known" unless they are initialized by arraycopy/clone.
2130 if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2131 return 0;
2132 }
2133 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");
2134 #ifdef ASSERT
2135 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) {
2136 const char* name = alloc->as_CallStaticJava()->_name;
2137 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
2138 }
2139 #endif
2140 // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2141 int new_edges = 0;
2142 for (EdgeIterator i(pta); i.has_next(); i.next()) {
2143 PointsToNode* field = i.get();
2144 if (field->is_Field() && field->as_Field()->is_oop()) {
2145 if (add_edge(field, phantom_obj)) {
2146 // New edge was added
2147 new_edges++;
2148 add_field_uses_to_worklist(field->as_Field());
2149 }
2150 }
2151 }
2152 return new_edges;
2153 }
2154
2155 // Find fields initializing values for allocations.
2156 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2157 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2158 Node* alloc = pta->ideal_node();
2159 // Do nothing for Call nodes since its fields values are unknown.
2160 if (!alloc->is_Allocate()) {
2161 return 0;
2162 }
2163 InitializeNode* ini = alloc->as_Allocate()->initialization();
2164 bool visited_bottom_offset = false;
2165 GrowableArray<int> offsets_worklist;
2166 int new_edges = 0;
2167
2168 // Check if an oop field's initializing value is recorded and add
2169 // a corresponding null if field's value if it is not recorded.
2170 // Connection Graph does not record a default initialization by null
2171 // captured by Initialize node.
2172 //
2173 for (EdgeIterator i(pta); i.has_next(); i.next()) {
2174 PointsToNode* field = i.get(); // Field (AddP)
2175 if (!field->is_Field() || !field->as_Field()->is_oop()) {
2176 continue; // Not oop field
2177 }
2178 int offset = field->as_Field()->offset();
2179 if (offset == Type::OffsetBot) {
2180 if (!visited_bottom_offset) {
2226 } else {
2227 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2228 tty->print_cr("----------init store has invalid value -----");
2229 store->dump();
2230 val->dump();
2231 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2232 }
2233 for (EdgeIterator j(val); j.has_next(); j.next()) {
2234 PointsToNode* obj = j.get();
2235 if (obj->is_JavaObject()) {
2236 if (!field->points_to(obj->as_JavaObject())) {
2237 missed_obj = obj;
2238 break;
2239 }
2240 }
2241 }
2242 }
2243 if (missed_obj != nullptr) {
2244 tty->print_cr("----------field---------------------------------");
2245 field->dump();
2246 tty->print_cr("----------missed referernce to object-----------");
2247 missed_obj->dump();
2248 tty->print_cr("----------object referernced by init store -----");
2249 store->dump();
2250 val->dump();
2251 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2252 }
2253 }
2254 #endif
2255 } else {
2256 // There could be initializing stores which follow allocation.
2257 // For example, a volatile field store is not collected
2258 // by Initialize node.
2259 //
2260 // Need to check for dependent loads to separate such stores from
2261 // stores which follow loads. For now, add initial value null so
2262 // that compare pointers optimization works correctly.
2263 }
2264 }
2265 if (value == nullptr) {
2266 // A field's initializing value was not recorded. Add null.
2267 if (add_edge(field, null_obj)) {
2268 // New edge was added
2530 assert(field->edge_count() > 0, "sanity");
2531 }
2532 }
2533 }
2534 }
2535 #endif
2536
2537 // Optimize ideal graph.
2538 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
2539 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
2540 Compile* C = _compile;
2541 PhaseIterGVN* igvn = _igvn;
2542 if (EliminateLocks) {
2543 // Mark locks before changing ideal graph.
2544 int cnt = C->macro_count();
2545 for (int i = 0; i < cnt; i++) {
2546 Node *n = C->macro_node(i);
2547 if (n->is_AbstractLock()) { // Lock and Unlock nodes
2548 AbstractLockNode* alock = n->as_AbstractLock();
2549 if (!alock->is_non_esc_obj()) {
2550 if (not_global_escape(alock->obj_node())) {
2551 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
2552 // The lock could be marked eliminated by lock coarsening
2553 // code during first IGVN before EA. Replace coarsened flag
2554 // to eliminate all associated locks/unlocks.
2555 #ifdef ASSERT
2556 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
2557 #endif
2558 alock->set_non_esc_obj();
2559 }
2560 }
2561 }
2562 }
2563 }
2564
2565 if (OptimizePtrCompare) {
2566 for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
2567 Node *n = ptr_cmp_worklist.at(i);
2568 const TypeInt* tcmp = optimize_ptr_compare(n);
2569 if (tcmp->singleton()) {
2570 Node* cmp = igvn->makecon(tcmp);
2571 #ifndef PRODUCT
2572 if (PrintOptimizePtrCompare) {
2573 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
2574 if (Verbose) {
2575 n->dump(1);
2576 }
2577 }
2578 #endif
2579 igvn->replace_node(n, cmp);
2580 }
2581 }
2582 }
2583
2584 // For MemBarStoreStore nodes added in library_call.cpp, check
2585 // escape status of associated AllocateNode and optimize out
2586 // MemBarStoreStore node if the allocated object never escapes.
2587 for (int i = 0; i < storestore_worklist.length(); i++) {
2588 Node* storestore = storestore_worklist.at(i);
2589 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
2590 if (alloc->is_Allocate() && not_global_escape(alloc)) {
2591 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
2592 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
2593 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
2594 igvn->register_new_node_with_optimizer(mb);
2595 igvn->replace_node(storestore, mb);
2596 }
2597 }
2598 }
2599
2600 // Optimize objects compare.
2601 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {
2602 assert(OptimizePtrCompare, "sanity");
2603 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
2604 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
2605 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
2606 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1]
2607
2608 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
2609 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
2610 JavaObjectNode* jobj1 = unique_java_object(n->in(1));
2611 JavaObjectNode* jobj2 = unique_java_object(n->in(2));
2612 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
2613 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
2614
2615 // Check simple cases first.
2729 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2730 assert((src != null_obj) && (dst != null_obj), "not for ConP null");
2731 PointsToNode* ptadr = _nodes.at(n->_idx);
2732 if (ptadr != nullptr) {
2733 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2734 return;
2735 }
2736 Compile* C = _compile;
2737 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2738 map_ideal_node(n, ptadr);
2739 // Add edge from arraycopy node to source object.
2740 (void)add_edge(ptadr, src);
2741 src->set_arraycopy_src();
2742 // Add edge from destination object to arraycopy node.
2743 (void)add_edge(dst, ptadr);
2744 dst->set_arraycopy_dst();
2745 }
2746
2747 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2748 const Type* adr_type = n->as_AddP()->bottom_type();
2749 BasicType bt = T_INT;
2750 if (offset == Type::OffsetBot) {
2751 // Check only oop fields.
2752 if (!adr_type->isa_aryptr() ||
2753 adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
2754 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
2755 // OffsetBot is used to reference array's element. Ignore first AddP.
2756 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
2757 bt = T_OBJECT;
2758 }
2759 }
2760 } else if (offset != oopDesc::klass_offset_in_bytes()) {
2761 if (adr_type->isa_instptr()) {
2762 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2763 if (field != nullptr) {
2764 bt = field->layout_type();
2765 } else {
2766 // Check for unsafe oop field access
2767 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2768 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2769 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2770 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2771 bt = T_OBJECT;
2772 (*unsafe) = true;
2773 }
2774 }
2775 } else if (adr_type->isa_aryptr()) {
2776 if (offset == arrayOopDesc::length_offset_in_bytes()) {
2777 // Ignore array length load.
2778 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
2779 // Ignore first AddP.
2780 } else {
2781 const Type* elemtype = adr_type->isa_aryptr()->elem();
2782 bt = elemtype->array_element_basic_type();
2783 }
2784 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2785 // Allocation initialization, ThreadLocal field access, unsafe access
2786 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2787 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2788 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2789 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2790 bt = T_OBJECT;
2791 }
2792 }
2793 }
2794 // Note: T_NARROWOOP is not classed as a real reference type
2795 return (is_reference_type(bt) || bt == T_NARROWOOP);
2796 }
2797
2798 // Returns unique pointed java object or null.
2799 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
2800 // If the node was created after the escape computation we can't answer.
2801 uint idx = n->_idx;
2802 if (idx >= nodes_size()) {
2945 return true;
2946 }
2947 }
2948 }
2949 }
2950 }
2951 return false;
2952 }
2953
2954 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
2955 const Type *adr_type = phase->type(adr);
2956 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
2957 // We are computing a raw address for a store captured by an Initialize
2958 // compute an appropriate address type. AddP cases #3 and #5 (see below).
2959 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2960 assert(offs != Type::OffsetBot ||
2961 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2962 "offset must be a constant or it is initialization of array");
2963 return offs;
2964 }
2965 const TypePtr *t_ptr = adr_type->isa_ptr();
2966 assert(t_ptr != nullptr, "must be a pointer type");
2967 return t_ptr->offset();
2968 }
2969
2970 Node* ConnectionGraph::get_addp_base(Node *addp) {
2971 assert(addp->is_AddP(), "must be AddP");
2972 //
2973 // AddP cases for Base and Address inputs:
2974 // case #1. Direct object's field reference:
2975 // Allocate
2976 // |
2977 // Proj #5 ( oop result )
2978 // |
2979 // CheckCastPP (cast to instance type)
2980 // | |
2981 // AddP ( base == address )
2982 //
2983 // case #2. Indirect object's field reference:
2984 // Phi
2985 // |
2986 // CastPP (cast to instance type)
2987 // | |
3101 }
3102 return nullptr;
3103 }
3104
3105 //
3106 // Adjust the type and inputs of an AddP which computes the
3107 // address of a field of an instance
3108 //
3109 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3110 PhaseGVN* igvn = _igvn;
3111 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3112 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3113 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3114 if (t == nullptr) {
3115 // We are computing a raw address for a store captured by an Initialize
3116 // compute an appropriate address type (cases #3 and #5).
3117 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3118 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3119 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3120 assert(offs != Type::OffsetBot, "offset must be a constant");
3121 t = base_t->add_offset(offs)->is_oopptr();
3122 }
3123 int inst_id = base_t->instance_id();
3124 assert(!t->is_known_instance() || t->instance_id() == inst_id,
3125 "old type must be non-instance or match new type");
3126
3127 // The type 't' could be subclass of 'base_t'.
3128 // As result t->offset() could be large then base_t's size and it will
3129 // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3130 // constructor verifies correctness of the offset.
3131 //
3132 // It could happened on subclass's branch (from the type profiling
3133 // inlining) which was not eliminated during parsing since the exactness
3134 // of the allocation type was not propagated to the subclass type check.
3135 //
3136 // Or the type 't' could be not related to 'base_t' at all.
3137 // It could happened when CHA type is different from MDO type on a dead path
3138 // (for example, from instanceof check) which is not collapsed during parsing.
3139 //
3140 // Do nothing for such AddP node and don't process its users since
3141 // this code branch will go away.
3142 //
3143 if (!t->is_known_instance() &&
3144 !base_t->maybe_java_subtype_of(t)) {
3145 return false; // bail out
3146 }
3147 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
3148 // Do NOT remove the next line: ensure a new alias index is allocated
3149 // for the instance type. Note: C++ will not remove it since the call
3150 // has side effect.
3151 int alias_idx = _compile->get_alias_index(tinst);
3152 igvn->set_type(addp, tinst);
3153 // record the allocation in the node map
3154 set_map(addp, get_map(base->_idx));
3155 // Set addp's Base and Address to 'base'.
3156 Node *abase = addp->in(AddPNode::Base);
3157 Node *adr = addp->in(AddPNode::Address);
3158 if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3159 adr->in(0)->_idx == (uint)inst_id) {
3160 // Skip AddP cases #3 and #5.
3161 } else {
3162 assert(!abase->is_top(), "sanity"); // AddP case #3
3163 if (abase != base) {
3164 igvn->hash_delete(addp);
3165 addp->set_req(AddPNode::Base, base);
3166 if (abase == adr) {
3167 addp->set_req(AddPNode::Address, base);
3827 ptnode_adr(n->_idx)->dump();
3828 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
3829 #endif
3830 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3831 return;
3832 } else {
3833 Node *val = get_map(jobj->idx()); // CheckCastPP node
3834 TypeNode *tn = n->as_Type();
3835 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3836 assert(tinst != nullptr && tinst->is_known_instance() &&
3837 tinst->instance_id() == jobj->idx() , "instance type expected.");
3838
3839 const Type *tn_type = igvn->type(tn);
3840 const TypeOopPtr *tn_t;
3841 if (tn_type->isa_narrowoop()) {
3842 tn_t = tn_type->make_ptr()->isa_oopptr();
3843 } else {
3844 tn_t = tn_type->isa_oopptr();
3845 }
3846 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
3847 if (tn_type->isa_narrowoop()) {
3848 tn_type = tinst->make_narrowoop();
3849 } else {
3850 tn_type = tinst;
3851 }
3852 igvn->hash_delete(tn);
3853 igvn->set_type(tn, tn_type);
3854 tn->set_type(tn_type);
3855 igvn->hash_insert(tn);
3856 record_for_optimizer(n);
3857 } else {
3858 assert(tn_type == TypePtr::NULL_PTR ||
3859 tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t),
3860 "unexpected type");
3861 continue; // Skip dead path with different type
3862 }
3863 }
3864 } else {
3865 debug_only(n->dump();)
3866 assert(false, "EA: unexpected node");
3867 continue;
3868 }
3869 // push allocation's users on appropriate worklist
3870 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3871 Node *use = n->fast_out(i);
3872 if(use->is_Mem() && use->in(MemNode::Address) == n) {
3873 // Load/store to instance's field
3874 memnode_worklist.append_if_missing(use);
3875 } else if (use->is_MemBar()) {
3876 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3877 memnode_worklist.append_if_missing(use);
3878 }
3879 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3880 Node* addp2 = find_second_addp(use, n);
3881 if (addp2 != nullptr) {
3882 alloc_worklist.append_if_missing(addp2);
3883 }
3884 alloc_worklist.append_if_missing(use);
3885 } else if (use->is_Phi() ||
3886 use->is_CheckCastPP() ||
3887 use->is_EncodeNarrowPtr() ||
3888 use->is_DecodeNarrowPtr() ||
3889 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3890 alloc_worklist.append_if_missing(use);
3891 #ifdef ASSERT
3892 } else if (use->is_Mem()) {
3893 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3894 } else if (use->is_MergeMem()) {
3895 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3896 } else if (use->is_SafePoint()) {
3897 // Look for MergeMem nodes for calls which reference unique allocation
3898 // (through CheckCastPP nodes) even for debug info.
3899 Node* m = use->in(TypeFunc::Memory);
3900 if (m->is_MergeMem()) {
3901 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3902 }
3903 } else if (use->Opcode() == Op_EncodeISOArray) {
3904 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3905 // EncodeISOArray overwrites destination array
3906 memnode_worklist.append_if_missing(use);
3907 }
3908 } else {
3909 uint op = use->Opcode();
3910 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3911 (use->in(MemNode::Memory) == n)) {
3912 // They overwrite memory edge corresponding to destination array,
3913 memnode_worklist.append_if_missing(use);
3914 } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3915 op == Op_CastP2X || op == Op_StoreCM ||
3916 op == Op_FastLock || op == Op_AryEq ||
3917 op == Op_StrComp || op == Op_CountPositives ||
3918 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3919 op == Op_StrEquals || op == Op_VectorizedHashCode ||
3920 op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3921 op == Op_SubTypeCheck ||
3922 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3923 n->dump();
3924 use->dump();
3925 assert(false, "EA: missing allocation reference path");
3926 }
3927 #endif
3928 }
3929 }
3930
3931 }
3932
3933 #ifdef ASSERT
3934 if (VerifyReduceAllocationMerges) {
3935 // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints.
3936 for (uint i = 0; i < reducible_merges.size(); i++) {
3937 Node* phi = reducible_merges.at(i);
3938
3939 if (!reduced_merges.member(phi)) {
3940 phi->dump(2);
3941 phi->dump(-2);
3994 if (memnode_worklist.length() == 0)
3995 return; // nothing to do
3996 while (memnode_worklist.length() != 0) {
3997 Node *n = memnode_worklist.pop();
3998 if (visited.test_set(n->_idx)) {
3999 continue;
4000 }
4001 if (n->is_Phi() || n->is_ClearArray()) {
4002 // we don't need to do anything, but the users must be pushed
4003 } else if (n->is_MemBar()) { // Initialize, MemBar nodes
4004 // we don't need to do anything, but the users must be pushed
4005 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4006 if (n == nullptr) {
4007 continue;
4008 }
4009 } else if (n->Opcode() == Op_StrCompressedCopy ||
4010 n->Opcode() == Op_EncodeISOArray) {
4011 // get the memory projection
4012 n = n->find_out_with(Op_SCMemProj);
4013 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4014 } else {
4015 assert(n->is_Mem(), "memory node required.");
4016 Node *addr = n->in(MemNode::Address);
4017 const Type *addr_t = igvn->type(addr);
4018 if (addr_t == Type::TOP) {
4019 continue;
4020 }
4021 assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4022 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4023 assert ((uint)alias_idx < new_index_end, "wrong alias index");
4024 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4025 if (_compile->failing()) {
4026 return;
4027 }
4028 if (mem != n->in(MemNode::Memory)) {
4029 // We delay the memory edge update since we need old one in
4030 // MergeMem code below when instances memory slices are separated.
4031 set_map(n, mem);
4032 }
4033 if (n->is_Load()) {
4036 // get the memory projection
4037 n = n->find_out_with(Op_SCMemProj);
4038 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4039 }
4040 }
4041 // push user on appropriate worklist
4042 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4043 Node *use = n->fast_out(i);
4044 if (use->is_Phi() || use->is_ClearArray()) {
4045 memnode_worklist.append_if_missing(use);
4046 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4047 if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
4048 continue;
4049 }
4050 memnode_worklist.append_if_missing(use);
4051 } else if (use->is_MemBar()) {
4052 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4053 memnode_worklist.append_if_missing(use);
4054 }
4055 #ifdef ASSERT
4056 } else if(use->is_Mem()) {
4057 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4058 } else if (use->is_MergeMem()) {
4059 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4060 } else if (use->Opcode() == Op_EncodeISOArray) {
4061 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4062 // EncodeISOArray overwrites destination array
4063 memnode_worklist.append_if_missing(use);
4064 }
4065 } else {
4066 uint op = use->Opcode();
4067 if ((use->in(MemNode::Memory) == n) &&
4068 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4069 // They overwrite memory edge corresponding to destination array,
4070 memnode_worklist.append_if_missing(use);
4071 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4072 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4073 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4074 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
4075 n->dump();
4076 use->dump();
4077 assert(false, "EA: missing memory path");
4078 }
4079 #endif
4080 }
4081 }
4082 }
4083
4084 // Phase 3: Process MergeMem nodes from mergemem_worklist.
4085 // Walk each memory slice moving the first node encountered of each
4086 // instance type to the input corresponding to its alias index.
4087 uint length = mergemem_worklist.length();
4088 for( uint next = 0; next < length; ++next ) {
4089 MergeMemNode* nmm = mergemem_worklist.at(next);
4090 assert(!visited.test_set(nmm->_idx), "should not be visited before");
4091 // Note: we don't want to use MergeMemStream here because we only want to
4092 // scan inputs which exist at the start, not ones we add during processing.
4093 // Note 2: MergeMem may already contains instance memory slices added
4094 // during find_inst_mem() call when memory nodes were processed above.
4141 Node* result = step_through_mergemem(nmm, ni, tinst);
4142 if (result == nmm->base_memory()) {
4143 // Didn't find instance memory, search through general slice recursively.
4144 result = nmm->memory_at(_compile->get_general_index(ni));
4145 result = find_inst_mem(result, ni, orig_phis);
4146 if (_compile->failing()) {
4147 return;
4148 }
4149 nmm->set_memory_at(ni, result);
4150 }
4151 }
4152 igvn->hash_insert(nmm);
4153 record_for_optimizer(nmm);
4154 }
4155
4156 // Phase 4: Update the inputs of non-instance memory Phis and
4157 // the Memory input of memnodes
4158 // First update the inputs of any non-instance Phi's from
4159 // which we split out an instance Phi. Note we don't have
4160 // to recursively process Phi's encountered on the input memory
4161 // chains as is done in split_memory_phi() since they will
4162 // also be processed here.
4163 for (int j = 0; j < orig_phis.length(); j++) {
4164 PhiNode *phi = orig_phis.at(j);
4165 int alias_idx = _compile->get_alias_index(phi->adr_type());
4166 igvn->hash_delete(phi);
4167 for (uint i = 1; i < phi->req(); i++) {
4168 Node *mem = phi->in(i);
4169 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4170 if (_compile->failing()) {
4171 return;
4172 }
4173 if (mem != new_mem) {
4174 phi->set_req(i, new_mem);
4175 }
4176 }
4177 igvn->hash_insert(phi);
4178 record_for_optimizer(phi);
4179 }
4180
4181 // Update the memory inputs of MemNodes with the value we computed
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/c2/barrierSetC2.hpp"
30 #include "libadt/vectset.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/metaspace.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "opto/c2compiler.hpp"
35 #include "opto/arraycopynode.hpp"
36 #include "opto/callnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/compile.hpp"
39 #include "opto/escape.hpp"
40 #include "opto/macro.hpp"
41 #include "opto/phaseX.hpp"
42 #include "opto/movenode.hpp"
43 #include "opto/rootnode.hpp"
44 #include "utilities/macros.hpp"
45
46 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
47 // If ReduceAllocationMerges is enabled we might call split_through_phi during
48 // split_unique_types and that will create additional nodes that need to be
49 // pushed to the ConnectionGraph. The code below bumps the initial capacity of
50 // _nodes by 10% to account for these additional nodes. If capacity is exceeded
51 // the array will be reallocated.
52 _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
145 GrowableArray<SafePointNode*> sfn_worklist;
146 GrowableArray<MergeMemNode*> mergemem_worklist;
147 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
148
149 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
150
151 // 1. Populate Connection Graph (CG) with PointsTo nodes.
152 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space
153 // Initialize worklist
154 if (C->root() != nullptr) {
155 ideal_nodes.push(C->root());
156 }
157 // Processed ideal nodes are unique on ideal_nodes list
158 // but several ideal nodes are mapped to the phantom_obj.
159 // To avoid duplicated entries on the following worklists
160 // add the phantom_obj only once to them.
161 ptnodes_worklist.append(phantom_obj);
162 java_objects_worklist.append(phantom_obj);
163 for( uint next = 0; next < ideal_nodes.size(); ++next ) {
164 Node* n = ideal_nodes.at(next);
165 if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
166 !n->in(MemNode::Address)->is_AddP() &&
167 _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
168 // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
169 Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
170 _igvn->register_new_node_with_optimizer(addp);
171 _igvn->replace_input_of(n, MemNode::Address, addp);
172 ideal_nodes.push(addp);
173 _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
174 }
175 // Create PointsTo nodes and add them to Connection Graph. Called
176 // only once per ideal node since ideal_nodes is Unique_Node list.
177 add_node_to_connection_graph(n, &delayed_worklist);
178 PointsToNode* ptn = ptnode_adr(n->_idx);
179 if (ptn != nullptr && ptn != phantom_obj) {
180 ptnodes_worklist.append(ptn);
181 if (ptn->is_JavaObject()) {
182 java_objects_worklist.append(ptn->as_JavaObject());
183 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
184 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
185 // Only allocations and java static calls results are interesting.
186 non_escaped_allocs_worklist.append(ptn->as_JavaObject());
187 }
188 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
189 oop_fields_worklist.append(ptn->as_Field());
190 }
191 }
192 // Collect some interesting nodes for further use.
193 switch (n->Opcode()) {
194 case Op_MergeMem:
687
688 // The next two inputs are:
689 // (1) A copy of the original pointer to NSR objects.
690 // (2) A selector, used to decide if we need to rematerialize an object
691 // or use the pointer to a NSR object.
692 // See more details of these fields in the declaration of SafePointScalarMergeNode
693 sfpt->add_req(ophi);
694 sfpt->add_req(selector);
695
696 for (uint i = 1; i < ophi->req(); i++) {
697 Node* base = ophi->in(i);
698 JavaObjectNode* ptn = unique_java_object(base);
699
700 // If the base is not scalar replaceable we don't need to register information about
701 // it at this time.
702 if (ptn == nullptr || !ptn->scalar_replaceable()) {
703 continue;
704 }
705
706 AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
707 Unique_Node_List value_worklist;
708 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist);
709 guarantee(value_worklist.size() == 0, "Unimplemented: Valhalla support for 8287061");
710 if (sobj == nullptr) {
711 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
712 return;
713 }
714
715 // Now make a pass over the debug information replacing any references
716 // to the allocated object with "sobj"
717 Node* ccpp = alloc->result_cast();
718 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
719
720 // Register the scalarized object as a candidate for reallocation
721 smerge->add_req(sobj);
722 }
723
724 // Replaces debug information references to "ophi" in "sfpt" with references to "smerge"
725 sfpt->replace_edges_in_range(ophi, smerge, debug_start, jvms->debug_end(), _igvn);
726
727 // The call to 'replace_edges_in_range' above might have removed the
728 // reference to ophi that we need at _merge_pointer_idx. The line below make
729 // sure the reference is maintained.
853 return false;
854 }
855
856 // Returns true if at least one of the arguments to the call is an object
857 // that does not escape globally.
858 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
859 if (call->method() != nullptr) {
860 uint max_idx = TypeFunc::Parms + call->method()->arg_size();
861 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
862 Node* p = call->in(idx);
863 if (not_global_escape(p)) {
864 return true;
865 }
866 }
867 } else {
868 const char* name = call->as_CallStaticJava()->_name;
869 assert(name != nullptr, "no name");
870 // no arg escapes through uncommon traps
871 if (strcmp(name, "uncommon_trap") != 0) {
872 // process_call_arguments() assumes that all arguments escape globally
873 const TypeTuple* d = call->tf()->domain_sig();
874 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
875 const Type* at = d->field_at(i);
876 if (at->isa_oopptr() != nullptr) {
877 return true;
878 }
879 }
880 }
881 }
882 return false;
883 }
884
885
886
887 // Utility function for nodes that load an object
888 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
889 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
890 // ThreadLocal has RawPtr type.
891 const Type* t = _igvn->type(n);
892 if (t->make_ptr() != nullptr) {
893 Node* adr = n->in(MemNode::Address);
927 // first IGVN optimization when escape information is still available.
928 record_for_optimizer(n);
929 } else if (n->is_Allocate()) {
930 add_call_node(n->as_Call());
931 record_for_optimizer(n);
932 } else {
933 if (n->is_CallStaticJava()) {
934 const char* name = n->as_CallStaticJava()->_name;
935 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
936 return; // Skip uncommon traps
937 }
938 }
939 // Don't mark as processed since call's arguments have to be processed.
940 delayed_worklist->push(n);
941 // Check if a call returns an object.
942 if ((n->as_Call()->returns_pointer() &&
943 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
944 (n->is_CallStaticJava() &&
945 n->as_CallStaticJava()->is_boxing_method())) {
946 add_call_node(n->as_Call());
947 } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
948 bool returns_oop = false;
949 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
950 ProjNode* pn = n->fast_out(i)->as_Proj();
951 if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
952 returns_oop = true;
953 }
954 }
955 if (returns_oop) {
956 add_call_node(n->as_Call());
957 }
958 }
959 }
960 return;
961 }
962 // Put this check here to process call arguments since some call nodes
963 // point to phantom_obj.
964 if (n_ptn == phantom_obj || n_ptn == null_obj) {
965 return; // Skip predefined nodes.
966 }
967 switch (opcode) {
968 case Op_AddP: {
969 Node* base = get_addp_base(n);
970 PointsToNode* ptn_base = ptnode_adr(base->_idx);
971 // Field nodes are created for all field types. They are used in
972 // adjust_scalar_replaceable_state() and split_unique_types().
973 // Note, non-oop fields will have only base edges in Connection
974 // Graph because such fields are not used for oop loads and stores.
975 int offset = address_offset(n, igvn);
976 add_field(n, PointsToNode::NoEscape, offset);
977 if (ptn_base == nullptr) {
978 delayed_worklist->push(n); // Process it later.
979 } else {
980 n_ptn = ptnode_adr(n_idx);
981 add_base(n_ptn->as_Field(), ptn_base);
982 }
983 break;
984 }
985 case Op_CastX2P: {
986 map_ideal_node(n, phantom_obj);
987 break;
988 }
989 case Op_InlineType:
990 case Op_CastPP:
991 case Op_CheckCastPP:
992 case Op_EncodeP:
993 case Op_DecodeN:
994 case Op_EncodePKlass:
995 case Op_DecodeNKlass: {
996 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
997 break;
998 }
999 case Op_CMoveP: {
1000 add_local_var(n, PointsToNode::NoEscape);
1001 // Do not add edges during first iteration because some could be
1002 // not defined yet.
1003 delayed_worklist->push(n);
1004 break;
1005 }
1006 case Op_ConP:
1007 case Op_ConN:
1008 case Op_ConNKlass: {
1009 // assume all oop constants globally escape except for null
1041 case Op_PartialSubtypeCheck: {
1042 // Produces Null or notNull and is used in only in CmpP so
1043 // phantom_obj could be used.
1044 map_ideal_node(n, phantom_obj); // Result is unknown
1045 break;
1046 }
1047 case Op_Phi: {
1048 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1049 // ThreadLocal has RawPtr type.
1050 const Type* t = n->as_Phi()->type();
1051 if (t->make_ptr() != nullptr) {
1052 add_local_var(n, PointsToNode::NoEscape);
1053 // Do not add edges during first iteration because some could be
1054 // not defined yet.
1055 delayed_worklist->push(n);
1056 }
1057 break;
1058 }
1059 case Op_Proj: {
1060 // we are only interested in the oop result projection from a call
1061 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
1062 (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
1063 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1064 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1065 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1066 }
1067 break;
1068 }
1069 case Op_Rethrow: // Exception object escapes
1070 case Op_Return: {
1071 if (n->req() > TypeFunc::Parms &&
1072 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1073 // Treat Return value as LocalVar with GlobalEscape escape state.
1074 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1075 }
1076 break;
1077 }
1078 case Op_CompareAndExchangeP:
1079 case Op_CompareAndExchangeN:
1080 case Op_GetAndSetP:
1081 case Op_GetAndSetN: {
1082 add_objload_to_connection_graph(n, delayed_worklist);
1083 // fall-through
1084 }
1146 if (n->is_Call()) {
1147 process_call_arguments(n->as_Call());
1148 return;
1149 }
1150 assert(n->is_Store() || n->is_LoadStore() ||
1151 (n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr),
1152 "node should be registered already");
1153 int opcode = n->Opcode();
1154 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1155 if (gc_handled) {
1156 return; // Ignore node if already handled by GC.
1157 }
1158 switch (opcode) {
1159 case Op_AddP: {
1160 Node* base = get_addp_base(n);
1161 PointsToNode* ptn_base = ptnode_adr(base->_idx);
1162 assert(ptn_base != nullptr, "field's base should be registered");
1163 add_base(n_ptn->as_Field(), ptn_base);
1164 break;
1165 }
1166 case Op_InlineType:
1167 case Op_CastPP:
1168 case Op_CheckCastPP:
1169 case Op_EncodeP:
1170 case Op_DecodeN:
1171 case Op_EncodePKlass:
1172 case Op_DecodeNKlass: {
1173 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1174 break;
1175 }
1176 case Op_CMoveP: {
1177 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1178 Node* in = n->in(i);
1179 if (in == nullptr) {
1180 continue; // ignore null
1181 }
1182 Node* uncast_in = in->uncast();
1183 if (uncast_in->is_top() || uncast_in == n) {
1184 continue; // ignore top or inputs which go back this node
1185 }
1186 PointsToNode* ptn = ptnode_adr(in->_idx);
1201 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1202 // ThreadLocal has RawPtr type.
1203 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1204 for (uint i = 1; i < n->req(); i++) {
1205 Node* in = n->in(i);
1206 if (in == nullptr) {
1207 continue; // ignore null
1208 }
1209 Node* uncast_in = in->uncast();
1210 if (uncast_in->is_top() || uncast_in == n) {
1211 continue; // ignore top or inputs which go back this node
1212 }
1213 PointsToNode* ptn = ptnode_adr(in->_idx);
1214 assert(ptn != nullptr, "node should be registered");
1215 add_edge(n_ptn, ptn);
1216 }
1217 break;
1218 }
1219 case Op_Proj: {
1220 // we are only interested in the oop result projection from a call
1221 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1222 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1223 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1224 break;
1225 }
1226 case Op_Rethrow: // Exception object escapes
1227 case Op_Return: {
1228 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1229 "Unexpected node type");
1230 // Treat Return value as LocalVar with GlobalEscape escape state.
1231 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1232 break;
1233 }
1234 case Op_CompareAndExchangeP:
1235 case Op_CompareAndExchangeN:
1236 case Op_GetAndSetP:
1237 case Op_GetAndSetN:{
1238 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1239 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1240 // fall-through
1241 }
1242 case Op_CompareAndSwapP:
1378 PointsToNode* ptn = ptnode_adr(val->_idx);
1379 assert(ptn != nullptr, "node should be registered");
1380 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1381 // Add edge to object for unsafe access with offset.
1382 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1383 assert(adr_ptn != nullptr, "node should be registered");
1384 if (adr_ptn->is_Field()) {
1385 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
1386 add_edge(adr_ptn, ptn);
1387 }
1388 return true;
1389 }
1390 #ifdef ASSERT
1391 n->dump(1);
1392 assert(false, "not unsafe");
1393 #endif
1394 return false;
1395 }
1396
1397 void ConnectionGraph::add_call_node(CallNode* call) {
1398 assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
1399 uint call_idx = call->_idx;
1400 if (call->is_Allocate()) {
1401 Node* k = call->in(AllocateNode::KlassNode);
1402 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
1403 assert(kt != nullptr, "TypeKlassPtr required.");
1404 PointsToNode::EscapeState es = PointsToNode::NoEscape;
1405 bool scalar_replaceable = true;
1406 NOT_PRODUCT(const char* nsr_reason = "");
1407 if (call->is_AllocateArray()) {
1408 if (!kt->isa_aryklassptr()) { // StressReflectiveCode
1409 es = PointsToNode::GlobalEscape;
1410 } else {
1411 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
1412 if (length < 0) {
1413 // Not scalar replaceable if the length is not constant.
1414 scalar_replaceable = false;
1415 NOT_PRODUCT(nsr_reason = "has a non-constant length");
1416 } else if (length > EliminateAllocationArraySizeLimit) {
1417 // Not scalar replaceable if the length is too big.
1418 scalar_replaceable = false;
1454 //
1455 // - all oop arguments are escaping globally;
1456 //
1457 // 2. CallStaticJavaNode (execute bytecode analysis if possible):
1458 //
1459 // - the same as CallDynamicJavaNode if can't do bytecode analysis;
1460 //
1461 // - mapped to GlobalEscape JavaObject node if unknown oop is returned;
1462 // - mapped to NoEscape JavaObject node if non-escaping object allocated
1463 // during call is returned;
1464 // - mapped to ArgEscape LocalVar node pointed to object arguments
1465 // which are returned and does not escape during call;
1466 //
1467 // - oop arguments escaping status is defined by bytecode analysis;
1468 //
1469 // For a static call, we know exactly what method is being called.
1470 // Use bytecode estimator to record whether the call's return value escapes.
1471 ciMethod* meth = call->as_CallJava()->method();
1472 if (meth == nullptr) {
1473 const char* name = call->as_CallStaticJava()->_name;
1474 assert(strncmp(name, "_multianewarray", 15) == 0 ||
1475 strncmp(name, "_load_unknown_inline", 20) == 0, "TODO: add failed case check");
1476 // Returns a newly allocated non-escaped object.
1477 add_java_object(call, PointsToNode::NoEscape);
1478 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
1479 } else if (meth->is_boxing_method()) {
1480 // Returns boxing object
1481 PointsToNode::EscapeState es;
1482 vmIntrinsics::ID intr = meth->intrinsic_id();
1483 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
1484 // It does not escape if object is always allocated.
1485 es = PointsToNode::NoEscape;
1486 } else {
1487 // It escapes globally if object could be loaded from cache.
1488 es = PointsToNode::GlobalEscape;
1489 }
1490 add_java_object(call, es);
1491 if (es == PointsToNode::GlobalEscape) {
1492 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
1493 }
1494 } else {
1495 BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
1496 call_analyzer->copy_dependencies(_compile->dependencies());
1497 if (call_analyzer->is_return_allocated()) {
1498 // Returns a newly allocated non-escaped object, simply
1499 // update dependency information.
1500 // Mark it as NoEscape so that objects referenced by
1501 // it's fields will be marked as NoEscape at least.
1502 add_java_object(call, PointsToNode::NoEscape);
1503 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
1504 } else {
1505 // Determine whether any arguments are returned.
1506 const TypeTuple* d = call->tf()->domain_cc();
1507 bool ret_arg = false;
1508 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1509 if (d->field_at(i)->isa_ptr() != nullptr &&
1510 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1511 ret_arg = true;
1512 break;
1513 }
1514 }
1515 if (ret_arg) {
1516 add_local_var(call, PointsToNode::ArgEscape);
1517 } else {
1518 // Returns unknown object.
1519 map_ideal_node(call, phantom_obj);
1520 }
1521 }
1522 }
1523 } else {
1524 // An other type of call, assume the worst case:
1525 // returned value is unknown and globally escapes.
1526 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
1534 #ifdef ASSERT
1535 case Op_Allocate:
1536 case Op_AllocateArray:
1537 case Op_Lock:
1538 case Op_Unlock:
1539 assert(false, "should be done already");
1540 break;
1541 #endif
1542 case Op_ArrayCopy:
1543 case Op_CallLeafNoFP:
1544 // Most array copies are ArrayCopy nodes at this point but there
1545 // are still a few direct calls to the copy subroutines (See
1546 // PhaseStringOpts::copy_string())
1547 is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
1548 call->as_CallLeaf()->is_call_to_arraycopystub();
1549 // fall through
1550 case Op_CallLeafVector:
1551 case Op_CallLeaf: {
1552 // Stub calls, objects do not escape but they are not scale replaceable.
1553 // Adjust escape state for outgoing arguments.
1554 const TypeTuple * d = call->tf()->domain_sig();
1555 bool src_has_oops = false;
1556 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1557 const Type* at = d->field_at(i);
1558 Node *arg = call->in(i);
1559 if (arg == nullptr) {
1560 continue;
1561 }
1562 const Type *aat = _igvn->type(arg);
1563 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
1564 continue;
1565 }
1566 if (arg->is_AddP()) {
1567 //
1568 // The inline_native_clone() case when the arraycopy stub is called
1569 // after the allocation before Initialize and CheckCastPP nodes.
1570 // Or normal arraycopy for object arrays case.
1571 //
1572 // Set AddP's base (Allocate) as not scalar replaceable since
1573 // pointer to the base (with offset) is passed as argument.
1574 //
1575 arg = get_addp_base(arg);
1576 }
1577 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1578 assert(arg_ptn != nullptr, "should be registered");
1579 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
1580 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
1581 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1582 aat->isa_ptr() != nullptr, "expecting an Ptr");
1583 bool arg_has_oops = aat->isa_oopptr() &&
1584 (aat->isa_instptr() ||
1585 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
1586 (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
1587 aat->isa_aryptr()->is_flat() &&
1588 aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
1589 if (i == TypeFunc::Parms) {
1590 src_has_oops = arg_has_oops;
1591 }
1592 //
1593 // src or dst could be j.l.Object when other is basic type array:
1594 //
1595 // arraycopy(char[],0,Object*,0,size);
1596 // arraycopy(Object*,0,char[],0,size);
1597 //
1598 // Don't add edges in such cases.
1599 //
1600 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1601 arg_has_oops && (i > TypeFunc::Parms);
1602 #ifdef ASSERT
1603 if (!(is_arraycopy ||
1604 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1605 (call->as_CallLeaf()->_name != nullptr &&
1606 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1607 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1608 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
1617 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
1618 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1619 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
1620 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1621 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
1622 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
1623 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
1624 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1625 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1626 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1627 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1628 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1629 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1630 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
1631 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
1632 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1633 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1634 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1635 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1636 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
1637 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1638 strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
1639 strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
1640 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
1641 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
1642 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1643 strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
1644 strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
1645 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)
1646 ))) {
1647 call->dump();
1648 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1649 }
1650 #endif
1651 // Always process arraycopy's destination object since
1652 // we need to add all possible edges to references in
1653 // source object.
1654 if (arg_esc >= PointsToNode::ArgEscape &&
1655 !arg_is_arraycopy_dest) {
1656 continue;
1657 }
1658 PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1659 if (call->is_ArrayCopy()) {
1684 }
1685 }
1686 }
1687 break;
1688 }
1689 case Op_CallStaticJava: {
1690 // For a static call, we know exactly what method is being called.
1691 // Use bytecode estimator to record the call's escape affects
1692 #ifdef ASSERT
1693 const char* name = call->as_CallStaticJava()->_name;
1694 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1695 #endif
1696 ciMethod* meth = call->as_CallJava()->method();
1697 if ((meth != nullptr) && meth->is_boxing_method()) {
1698 break; // Boxing methods do not modify any oops.
1699 }
1700 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
1701 // fall-through if not a Java method or no analyzer information
1702 if (call_analyzer != nullptr) {
1703 PointsToNode* call_ptn = ptnode_adr(call->_idx);
1704 const TypeTuple* d = call->tf()->domain_cc();
1705 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1706 const Type* at = d->field_at(i);
1707 int k = i - TypeFunc::Parms;
1708 Node* arg = call->in(i);
1709 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1710 if (at->isa_ptr() != nullptr &&
1711 call_analyzer->is_arg_returned(k)) {
1712 // The call returns arguments.
1713 if (call_ptn != nullptr) { // Is call's result used?
1714 assert(call_ptn->is_LocalVar(), "node should be registered");
1715 assert(arg_ptn != nullptr, "node should be registered");
1716 add_edge(call_ptn, arg_ptn);
1717 }
1718 }
1719 if (at->isa_oopptr() != nullptr &&
1720 arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1721 if (!call_analyzer->is_arg_stack(k)) {
1722 // The argument global escapes
1723 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1724 } else {
1728 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1729 }
1730 }
1731 }
1732 }
1733 if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
1734 // The call returns arguments.
1735 assert(call_ptn->edge_count() > 0, "sanity");
1736 if (!call_analyzer->is_return_local()) {
1737 // Returns also unknown object.
1738 add_edge(call_ptn, phantom_obj);
1739 }
1740 }
1741 break;
1742 }
1743 }
1744 default: {
1745 // Fall-through here if not a Java method or no analyzer information
1746 // or some other type of call, assume the worst case: all arguments
1747 // globally escape.
1748 const TypeTuple* d = call->tf()->domain_cc();
1749 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1750 const Type* at = d->field_at(i);
1751 if (at->isa_oopptr() != nullptr) {
1752 Node* arg = call->in(i);
1753 if (arg->is_AddP()) {
1754 arg = get_addp_base(arg);
1755 }
1756 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
1757 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1758 }
1759 }
1760 }
1761 }
1762 }
1763
1764
1765 // Finish Graph construction.
1766 bool ConnectionGraph::complete_connection_graph(
1767 GrowableArray<PointsToNode*>& ptnodes_worklist,
1768 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
2141 PointsToNode* base = i.get();
2142 if (base->is_JavaObject()) {
2143 // Skip Allocate's fields which will be processed later.
2144 if (base->ideal_node()->is_Allocate()) {
2145 return 0;
2146 }
2147 assert(base == null_obj, "only null ptr base expected here");
2148 }
2149 }
2150 if (add_edge(field, phantom_obj)) {
2151 // New edge was added
2152 new_edges++;
2153 add_field_uses_to_worklist(field);
2154 }
2155 return new_edges;
2156 }
2157
2158 // Find fields initializing values for allocations.
2159 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2160 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2161 PointsToNode* init_val = phantom_obj;
2162 Node* alloc = pta->ideal_node();
2163
2164 // Do nothing for Allocate nodes since its fields values are
2165 // "known" unless they are initialized by arraycopy/clone.
2166 if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2167 if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
2168 // Non-flat inline type arrays are initialized with
2169 // the default value instead of null. Handle them here.
2170 init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx);
2171 assert(init_val != nullptr, "default value should be registered");
2172 } else {
2173 return 0;
2174 }
2175 }
2176 // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
2177 assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
2178 #ifdef ASSERT
2179 if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
2180 const char* name = alloc->as_CallStaticJava()->_name;
2181 assert(strncmp(name, "_multianewarray", 15) == 0 ||
2182 strncmp(name, "_load_unknown_inline", 20) == 0, "sanity");
2183 }
2184 #endif
2185 // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2186 int new_edges = 0;
2187 for (EdgeIterator i(pta); i.has_next(); i.next()) {
2188 PointsToNode* field = i.get();
2189 if (field->is_Field() && field->as_Field()->is_oop()) {
2190 if (add_edge(field, init_val)) {
2191 // New edge was added
2192 new_edges++;
2193 add_field_uses_to_worklist(field->as_Field());
2194 }
2195 }
2196 }
2197 return new_edges;
2198 }
2199
2200 // Find fields initializing values for allocations.
2201 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2202 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2203 Node* alloc = pta->ideal_node();
2204 // Do nothing for Call nodes since its fields values are unknown.
2205 if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
2206 return 0;
2207 }
2208 InitializeNode* ini = alloc->as_Allocate()->initialization();
2209 bool visited_bottom_offset = false;
2210 GrowableArray<int> offsets_worklist;
2211 int new_edges = 0;
2212
2213 // Check if an oop field's initializing value is recorded and add
2214 // a corresponding null if field's value if it is not recorded.
2215 // Connection Graph does not record a default initialization by null
2216 // captured by Initialize node.
2217 //
2218 for (EdgeIterator i(pta); i.has_next(); i.next()) {
2219 PointsToNode* field = i.get(); // Field (AddP)
2220 if (!field->is_Field() || !field->as_Field()->is_oop()) {
2221 continue; // Not oop field
2222 }
2223 int offset = field->as_Field()->offset();
2224 if (offset == Type::OffsetBot) {
2225 if (!visited_bottom_offset) {
2271 } else {
2272 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2273 tty->print_cr("----------init store has invalid value -----");
2274 store->dump();
2275 val->dump();
2276 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2277 }
2278 for (EdgeIterator j(val); j.has_next(); j.next()) {
2279 PointsToNode* obj = j.get();
2280 if (obj->is_JavaObject()) {
2281 if (!field->points_to(obj->as_JavaObject())) {
2282 missed_obj = obj;
2283 break;
2284 }
2285 }
2286 }
2287 }
2288 if (missed_obj != nullptr) {
2289 tty->print_cr("----------field---------------------------------");
2290 field->dump();
2291 tty->print_cr("----------missed reference to object------------");
2292 missed_obj->dump();
2293 tty->print_cr("----------object referenced by init store-------");
2294 store->dump();
2295 val->dump();
2296 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2297 }
2298 }
2299 #endif
2300 } else {
2301 // There could be initializing stores which follow allocation.
2302 // For example, a volatile field store is not collected
2303 // by Initialize node.
2304 //
2305 // Need to check for dependent loads to separate such stores from
2306 // stores which follow loads. For now, add initial value null so
2307 // that compare pointers optimization works correctly.
2308 }
2309 }
2310 if (value == nullptr) {
2311 // A field's initializing value was not recorded. Add null.
2312 if (add_edge(field, null_obj)) {
2313 // New edge was added
2575 assert(field->edge_count() > 0, "sanity");
2576 }
2577 }
2578 }
2579 }
2580 #endif
2581
2582 // Optimize ideal graph.
2583 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
2584 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
2585 Compile* C = _compile;
2586 PhaseIterGVN* igvn = _igvn;
2587 if (EliminateLocks) {
2588 // Mark locks before changing ideal graph.
2589 int cnt = C->macro_count();
2590 for (int i = 0; i < cnt; i++) {
2591 Node *n = C->macro_node(i);
2592 if (n->is_AbstractLock()) { // Lock and Unlock nodes
2593 AbstractLockNode* alock = n->as_AbstractLock();
2594 if (!alock->is_non_esc_obj()) {
2595 const Type* obj_type = igvn->type(alock->obj_node());
2596 if (not_global_escape(alock->obj_node()) && !obj_type->is_inlinetypeptr()) {
2597 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
2598 // The lock could be marked eliminated by lock coarsening
2599 // code during first IGVN before EA. Replace coarsened flag
2600 // to eliminate all associated locks/unlocks.
2601 #ifdef ASSERT
2602 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
2603 #endif
2604 alock->set_non_esc_obj();
2605 }
2606 }
2607 }
2608 }
2609 }
2610
2611 if (OptimizePtrCompare) {
2612 for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
2613 Node *n = ptr_cmp_worklist.at(i);
2614 const TypeInt* tcmp = optimize_ptr_compare(n);
2615 if (tcmp->singleton()) {
2616 Node* cmp = igvn->makecon(tcmp);
2617 #ifndef PRODUCT
2618 if (PrintOptimizePtrCompare) {
2619 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
2620 if (Verbose) {
2621 n->dump(1);
2622 }
2623 }
2624 #endif
2625 igvn->replace_node(n, cmp);
2626 }
2627 }
2628 }
2629
2630 // For MemBarStoreStore nodes added in library_call.cpp, check
2631 // escape status of associated AllocateNode and optimize out
2632 // MemBarStoreStore node if the allocated object never escapes.
2633 for (int i = 0; i < storestore_worklist.length(); i++) {
2634 Node* storestore = storestore_worklist.at(i);
2635 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
2636 if (alloc->is_Allocate() && not_global_escape(alloc)) {
2637 if (alloc->in(AllocateNode::InlineType) != nullptr) {
2638 // Non-escaping inline type buffer allocations don't require a membar
2639 storestore->as_MemBar()->remove(_igvn);
2640 } else {
2641 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
2642 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
2643 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
2644 igvn->register_new_node_with_optimizer(mb);
2645 igvn->replace_node(storestore, mb);
2646 }
2647 }
2648 }
2649 }
2650
2651 // Optimize objects compare.
2652 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {
2653 assert(OptimizePtrCompare, "sanity");
2654 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
2655 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
2656 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
2657 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1]
2658
2659 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
2660 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
2661 JavaObjectNode* jobj1 = unique_java_object(n->in(1));
2662 JavaObjectNode* jobj2 = unique_java_object(n->in(2));
2663 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
2664 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
2665
2666 // Check simple cases first.
2780 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2781 assert((src != null_obj) && (dst != null_obj), "not for ConP null");
2782 PointsToNode* ptadr = _nodes.at(n->_idx);
2783 if (ptadr != nullptr) {
2784 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2785 return;
2786 }
2787 Compile* C = _compile;
2788 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2789 map_ideal_node(n, ptadr);
2790 // Add edge from arraycopy node to source object.
2791 (void)add_edge(ptadr, src);
2792 src->set_arraycopy_src();
2793 // Add edge from destination object to arraycopy node.
2794 (void)add_edge(dst, ptadr);
2795 dst->set_arraycopy_dst();
2796 }
2797
2798 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2799 const Type* adr_type = n->as_AddP()->bottom_type();
2800 int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
2801 BasicType bt = T_INT;
2802 if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
2803 // Check only oop fields.
2804 if (!adr_type->isa_aryptr() ||
2805 adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
2806 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
2807 // OffsetBot is used to reference array's element. Ignore first AddP.
2808 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
2809 bt = T_OBJECT;
2810 }
2811 }
2812 } else if (offset != oopDesc::klass_offset_in_bytes()) {
2813 if (adr_type->isa_instptr()) {
2814 ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
2815 if (field != nullptr) {
2816 bt = field->layout_type();
2817 } else {
2818 // Check for unsafe oop field access
2819 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2820 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2821 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2822 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2823 bt = T_OBJECT;
2824 (*unsafe) = true;
2825 }
2826 }
2827 } else if (adr_type->isa_aryptr()) {
2828 if (offset == arrayOopDesc::length_offset_in_bytes()) {
2829 // Ignore array length load.
2830 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
2831 // Ignore first AddP.
2832 } else {
2833 const Type* elemtype = adr_type->is_aryptr()->elem();
2834 if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
2835 ciInlineKlass* vk = elemtype->inline_klass();
2836 field_offset += vk->first_field_offset();
2837 bt = vk->get_field_by_offset(field_offset, false)->layout_type();
2838 } else {
2839 bt = elemtype->array_element_basic_type();
2840 }
2841 }
2842 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2843 // Allocation initialization, ThreadLocal field access, unsafe access
2844 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2845 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2846 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2847 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2848 bt = T_OBJECT;
2849 }
2850 }
2851 }
2852 // Note: T_NARROWOOP is not classed as a real reference type
2853 return (is_reference_type(bt) || bt == T_NARROWOOP);
2854 }
2855
2856 // Returns unique pointed java object or null.
2857 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
2858 // If the node was created after the escape computation we can't answer.
2859 uint idx = n->_idx;
2860 if (idx >= nodes_size()) {
3003 return true;
3004 }
3005 }
3006 }
3007 }
3008 }
3009 return false;
3010 }
3011
3012 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3013 const Type *adr_type = phase->type(adr);
3014 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3015 // We are computing a raw address for a store captured by an Initialize
3016 // compute an appropriate address type. AddP cases #3 and #5 (see below).
3017 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3018 assert(offs != Type::OffsetBot ||
3019 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3020 "offset must be a constant or it is initialization of array");
3021 return offs;
3022 }
3023 return adr_type->is_ptr()->flat_offset();
3024 }
3025
3026 Node* ConnectionGraph::get_addp_base(Node *addp) {
3027 assert(addp->is_AddP(), "must be AddP");
3028 //
3029 // AddP cases for Base and Address inputs:
3030 // case #1. Direct object's field reference:
3031 // Allocate
3032 // |
3033 // Proj #5 ( oop result )
3034 // |
3035 // CheckCastPP (cast to instance type)
3036 // | |
3037 // AddP ( base == address )
3038 //
3039 // case #2. Indirect object's field reference:
3040 // Phi
3041 // |
3042 // CastPP (cast to instance type)
3043 // | |
3157 }
3158 return nullptr;
3159 }
3160
3161 //
3162 // Adjust the type and inputs of an AddP which computes the
3163 // address of a field of an instance
3164 //
3165 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3166 PhaseGVN* igvn = _igvn;
3167 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3168 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3169 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3170 if (t == nullptr) {
3171 // We are computing a raw address for a store captured by an Initialize
3172 // compute an appropriate address type (cases #3 and #5).
3173 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3174 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3175 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3176 assert(offs != Type::OffsetBot, "offset must be a constant");
3177 if (base_t->isa_aryptr() != nullptr) {
3178 // In the case of a flat inline type array, each field has its
3179 // own slice so we need to extract the field being accessed from
3180 // the address computation
3181 t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
3182 } else {
3183 t = base_t->add_offset(offs)->is_oopptr();
3184 }
3185 }
3186 int inst_id = base_t->instance_id();
3187 assert(!t->is_known_instance() || t->instance_id() == inst_id,
3188 "old type must be non-instance or match new type");
3189
3190 // The type 't' could be subclass of 'base_t'.
3191 // As result t->offset() could be large then base_t's size and it will
3192 // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3193 // constructor verifies correctness of the offset.
3194 //
3195 // It could happened on subclass's branch (from the type profiling
3196 // inlining) which was not eliminated during parsing since the exactness
3197 // of the allocation type was not propagated to the subclass type check.
3198 //
3199 // Or the type 't' could be not related to 'base_t' at all.
3200 // It could happen when CHA type is different from MDO type on a dead path
3201 // (for example, from instanceof check) which is not collapsed during parsing.
3202 //
3203 // Do nothing for such AddP node and don't process its users since
3204 // this code branch will go away.
3205 //
3206 if (!t->is_known_instance() &&
3207 !base_t->maybe_java_subtype_of(t)) {
3208 return false; // bail out
3209 }
3210 const TypePtr* tinst = base_t->add_offset(t->offset());
3211 if (tinst->isa_aryptr() && t->isa_aryptr()) {
3212 // In the case of a flat inline type array, each field has its
3213 // own slice so we need to keep track of the field being accessed.
3214 tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
3215 // Keep array properties (not flat/null-free)
3216 tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
3217 if (tinst == nullptr) {
3218 return false; // Skip dead path with inconsistent properties
3219 }
3220 }
3221
3222 // Do NOT remove the next line: ensure a new alias index is allocated
3223 // for the instance type. Note: C++ will not remove it since the call
3224 // has side effect.
3225 int alias_idx = _compile->get_alias_index(tinst);
3226 igvn->set_type(addp, tinst);
3227 // record the allocation in the node map
3228 set_map(addp, get_map(base->_idx));
3229 // Set addp's Base and Address to 'base'.
3230 Node *abase = addp->in(AddPNode::Base);
3231 Node *adr = addp->in(AddPNode::Address);
3232 if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3233 adr->in(0)->_idx == (uint)inst_id) {
3234 // Skip AddP cases #3 and #5.
3235 } else {
3236 assert(!abase->is_top(), "sanity"); // AddP case #3
3237 if (abase != base) {
3238 igvn->hash_delete(addp);
3239 addp->set_req(AddPNode::Base, base);
3240 if (abase == adr) {
3241 addp->set_req(AddPNode::Address, base);
3901 ptnode_adr(n->_idx)->dump();
3902 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
3903 #endif
3904 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3905 return;
3906 } else {
3907 Node *val = get_map(jobj->idx()); // CheckCastPP node
3908 TypeNode *tn = n->as_Type();
3909 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3910 assert(tinst != nullptr && tinst->is_known_instance() &&
3911 tinst->instance_id() == jobj->idx() , "instance type expected.");
3912
3913 const Type *tn_type = igvn->type(tn);
3914 const TypeOopPtr *tn_t;
3915 if (tn_type->isa_narrowoop()) {
3916 tn_t = tn_type->make_ptr()->isa_oopptr();
3917 } else {
3918 tn_t = tn_type->isa_oopptr();
3919 }
3920 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
3921 if (tn_t->isa_aryptr()) {
3922 // Keep array properties (not flat/null-free)
3923 tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
3924 if (tinst == nullptr) {
3925 continue; // Skip dead path with inconsistent properties
3926 }
3927 }
3928 if (tn_type->isa_narrowoop()) {
3929 tn_type = tinst->make_narrowoop();
3930 } else {
3931 tn_type = tinst;
3932 }
3933 igvn->hash_delete(tn);
3934 igvn->set_type(tn, tn_type);
3935 tn->set_type(tn_type);
3936 igvn->hash_insert(tn);
3937 record_for_optimizer(n);
3938 } else {
3939 assert(tn_type == TypePtr::NULL_PTR ||
3940 tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t),
3941 "unexpected type");
3942 continue; // Skip dead path with different type
3943 }
3944 }
3945 } else {
3946 debug_only(n->dump();)
3947 assert(false, "EA: unexpected node");
3948 continue;
3949 }
3950 // push allocation's users on appropriate worklist
3951 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3952 Node *use = n->fast_out(i);
3953 if (use->is_Mem() && use->in(MemNode::Address) == n) {
3954 // Load/store to instance's field
3955 memnode_worklist.append_if_missing(use);
3956 } else if (use->is_MemBar()) {
3957 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3958 memnode_worklist.append_if_missing(use);
3959 }
3960 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3961 Node* addp2 = find_second_addp(use, n);
3962 if (addp2 != nullptr) {
3963 alloc_worklist.append_if_missing(addp2);
3964 }
3965 alloc_worklist.append_if_missing(use);
3966 } else if (use->is_Phi() ||
3967 use->is_CheckCastPP() ||
3968 use->is_EncodeNarrowPtr() ||
3969 use->is_DecodeNarrowPtr() ||
3970 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3971 alloc_worklist.append_if_missing(use);
3972 #ifdef ASSERT
3973 } else if (use->is_Mem()) {
3974 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3975 } else if (use->is_MergeMem()) {
3976 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3977 } else if (use->is_SafePoint()) {
3978 // Look for MergeMem nodes for calls which reference unique allocation
3979 // (through CheckCastPP nodes) even for debug info.
3980 Node* m = use->in(TypeFunc::Memory);
3981 if (m->is_MergeMem()) {
3982 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3983 }
3984 } else if (use->Opcode() == Op_EncodeISOArray) {
3985 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3986 // EncodeISOArray overwrites destination array
3987 memnode_worklist.append_if_missing(use);
3988 }
3989 } else if (use->Opcode() == Op_Return) {
3990 // Allocation is referenced by field of returned inline type
3991 assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
3992 } else {
3993 uint op = use->Opcode();
3994 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3995 (use->in(MemNode::Memory) == n)) {
3996 // They overwrite memory edge corresponding to destination array,
3997 memnode_worklist.append_if_missing(use);
3998 } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3999 op == Op_CastP2X || op == Op_StoreCM ||
4000 op == Op_FastLock || op == Op_AryEq ||
4001 op == Op_StrComp || op == Op_CountPositives ||
4002 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4003 op == Op_StrEquals || op == Op_VectorizedHashCode ||
4004 op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4005 op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
4006 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4007 n->dump();
4008 use->dump();
4009 assert(false, "EA: missing allocation reference path");
4010 }
4011 #endif
4012 }
4013 }
4014
4015 }
4016
4017 #ifdef ASSERT
4018 if (VerifyReduceAllocationMerges) {
4019 // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints.
4020 for (uint i = 0; i < reducible_merges.size(); i++) {
4021 Node* phi = reducible_merges.at(i);
4022
4023 if (!reduced_merges.member(phi)) {
4024 phi->dump(2);
4025 phi->dump(-2);
4078 if (memnode_worklist.length() == 0)
4079 return; // nothing to do
4080 while (memnode_worklist.length() != 0) {
4081 Node *n = memnode_worklist.pop();
4082 if (visited.test_set(n->_idx)) {
4083 continue;
4084 }
4085 if (n->is_Phi() || n->is_ClearArray()) {
4086 // we don't need to do anything, but the users must be pushed
4087 } else if (n->is_MemBar()) { // Initialize, MemBar nodes
4088 // we don't need to do anything, but the users must be pushed
4089 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4090 if (n == nullptr) {
4091 continue;
4092 }
4093 } else if (n->Opcode() == Op_StrCompressedCopy ||
4094 n->Opcode() == Op_EncodeISOArray) {
4095 // get the memory projection
4096 n = n->find_out_with(Op_SCMemProj);
4097 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4098 } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
4099 strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4100 n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
4101 } else {
4102 assert(n->is_Mem(), "memory node required.");
4103 Node *addr = n->in(MemNode::Address);
4104 const Type *addr_t = igvn->type(addr);
4105 if (addr_t == Type::TOP) {
4106 continue;
4107 }
4108 assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4109 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4110 assert ((uint)alias_idx < new_index_end, "wrong alias index");
4111 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4112 if (_compile->failing()) {
4113 return;
4114 }
4115 if (mem != n->in(MemNode::Memory)) {
4116 // We delay the memory edge update since we need old one in
4117 // MergeMem code below when instances memory slices are separated.
4118 set_map(n, mem);
4119 }
4120 if (n->is_Load()) {
4123 // get the memory projection
4124 n = n->find_out_with(Op_SCMemProj);
4125 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4126 }
4127 }
4128 // push user on appropriate worklist
4129 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4130 Node *use = n->fast_out(i);
4131 if (use->is_Phi() || use->is_ClearArray()) {
4132 memnode_worklist.append_if_missing(use);
4133 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4134 if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
4135 continue;
4136 }
4137 memnode_worklist.append_if_missing(use);
4138 } else if (use->is_MemBar()) {
4139 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4140 memnode_worklist.append_if_missing(use);
4141 }
4142 #ifdef ASSERT
4143 } else if (use->is_Mem()) {
4144 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4145 } else if (use->is_MergeMem()) {
4146 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4147 } else if (use->Opcode() == Op_EncodeISOArray) {
4148 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4149 // EncodeISOArray overwrites destination array
4150 memnode_worklist.append_if_missing(use);
4151 }
4152 } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
4153 strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4154 // store_unknown_inline overwrites destination array
4155 memnode_worklist.append_if_missing(use);
4156 } else {
4157 uint op = use->Opcode();
4158 if ((use->in(MemNode::Memory) == n) &&
4159 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4160 // They overwrite memory edge corresponding to destination array,
4161 memnode_worklist.append_if_missing(use);
4162 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4163 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4164 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4165 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
4166 n->dump();
4167 use->dump();
4168 assert(false, "EA: missing memory path");
4169 }
4170 #endif
4171 }
4172 }
4173 }
4174
4175 // Phase 3: Process MergeMem nodes from mergemem_worklist.
4176 // Walk each memory slice moving the first node encountered of each
4177 // instance type to the input corresponding to its alias index.
4178 uint length = mergemem_worklist.length();
4179 for( uint next = 0; next < length; ++next ) {
4180 MergeMemNode* nmm = mergemem_worklist.at(next);
4181 assert(!visited.test_set(nmm->_idx), "should not be visited before");
4182 // Note: we don't want to use MergeMemStream here because we only want to
4183 // scan inputs which exist at the start, not ones we add during processing.
4184 // Note 2: MergeMem may already contains instance memory slices added
4185 // during find_inst_mem() call when memory nodes were processed above.
4232 Node* result = step_through_mergemem(nmm, ni, tinst);
4233 if (result == nmm->base_memory()) {
4234 // Didn't find instance memory, search through general slice recursively.
4235 result = nmm->memory_at(_compile->get_general_index(ni));
4236 result = find_inst_mem(result, ni, orig_phis);
4237 if (_compile->failing()) {
4238 return;
4239 }
4240 nmm->set_memory_at(ni, result);
4241 }
4242 }
4243 igvn->hash_insert(nmm);
4244 record_for_optimizer(nmm);
4245 }
4246
4247 // Phase 4: Update the inputs of non-instance memory Phis and
4248 // the Memory input of memnodes
4249 // First update the inputs of any non-instance Phi's from
4250 // which we split out an instance Phi. Note we don't have
4251 // to recursively process Phi's encountered on the input memory
4252 // chains as is done in split_memory_phi() since they will
4253 // also be processed here.
4254 for (int j = 0; j < orig_phis.length(); j++) {
4255 PhiNode *phi = orig_phis.at(j);
4256 int alias_idx = _compile->get_alias_index(phi->adr_type());
4257 igvn->hash_delete(phi);
4258 for (uint i = 1; i < phi->req(); i++) {
4259 Node *mem = phi->in(i);
4260 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4261 if (_compile->failing()) {
4262 return;
4263 }
4264 if (mem != new_mem) {
4265 phi->set_req(i, new_mem);
4266 }
4267 }
4268 igvn->hash_insert(phi);
4269 record_for_optimizer(phi);
4270 }
4271
4272 // Update the memory inputs of MemNodes with the value we computed
|