12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/c2/barrierSetC2.hpp"
30 #include "libadt/vectset.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "opto/c2compiler.hpp"
34 #include "opto/arraycopynode.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/cfgnode.hpp"
37 #include "opto/compile.hpp"
38 #include "opto/escape.hpp"
39 #include "opto/macro.hpp"
40 #include "opto/phaseX.hpp"
41 #include "opto/movenode.hpp"
42 #include "opto/rootnode.hpp"
43 #include "utilities/macros.hpp"
44
45 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
46 // If ReduceAllocationMerges is enabled we might call split_through_phi during
47 // split_unique_types and that will create additional nodes that need to be
48 // pushed to the ConnectionGraph. The code below bumps the initial capacity of
49 // _nodes by 10% to account for these additional nodes. If capacity is exceeded
50 // the array will be reallocated.
51 _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
144 GrowableArray<SafePointNode*> sfn_worklist;
145 GrowableArray<MergeMemNode*> mergemem_worklist;
146 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
147
148 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
149
150 // 1. Populate Connection Graph (CG) with PointsTo nodes.
151 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space
152 // Initialize worklist
153 if (C->root() != nullptr) {
154 ideal_nodes.push(C->root());
155 }
156 // Processed ideal nodes are unique on ideal_nodes list
157 // but several ideal nodes are mapped to the phantom_obj.
158 // To avoid duplicated entries on the following worklists
159 // add the phantom_obj only once to them.
160 ptnodes_worklist.append(phantom_obj);
161 java_objects_worklist.append(phantom_obj);
162 for( uint next = 0; next < ideal_nodes.size(); ++next ) {
163 Node* n = ideal_nodes.at(next);
164 // Create PointsTo nodes and add them to Connection Graph. Called
165 // only once per ideal node since ideal_nodes is Unique_Node list.
166 add_node_to_connection_graph(n, &delayed_worklist);
167 PointsToNode* ptn = ptnode_adr(n->_idx);
168 if (ptn != nullptr && ptn != phantom_obj) {
169 ptnodes_worklist.append(ptn);
170 if (ptn->is_JavaObject()) {
171 java_objects_worklist.append(ptn->as_JavaObject());
172 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
173 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
174 // Only allocations and java static calls results are interesting.
175 non_escaped_allocs_worklist.append(ptn->as_JavaObject());
176 }
177 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
178 oop_fields_worklist.append(ptn->as_Field());
179 }
180 }
181 // Collect some interesting nodes for further use.
182 switch (n->Opcode()) {
183 case Op_MergeMem:
646
647 // The next two inputs are:
648 // (1) A copy of the original pointer to NSR objects.
649 // (2) A selector, used to decide if we need to rematerialize an object
650 // or use the pointer to a NSR object.
651 // See more details of these fields in the declaration of SafePointScalarMergeNode
652 sfpt->add_req(ophi);
653 sfpt->add_req(selector);
654
655 for (uint i = 1; i < ophi->req(); i++) {
656 Node* base = ophi->in(i);
657 JavaObjectNode* ptn = unique_java_object(base);
658
659 // If the base is not scalar replaceable we don't need to register information about
660 // it at this time.
661 if (ptn == nullptr || !ptn->scalar_replaceable()) {
662 continue;
663 }
664
665 AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
666 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt);
667 if (sobj == nullptr) {
668 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
669 return;
670 }
671
672 // Now make a pass over the debug information replacing any references
673 // to the allocated object with "sobj"
674 Node* ccpp = alloc->result_cast();
675 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
676
677 // Register the scalarized object as a candidate for reallocation
678 smerge->add_req(sobj);
679 }
680
681 // Replaces debug information references to "ophi" in "sfpt" with references to "smerge"
682 sfpt->replace_edges_in_range(ophi, smerge, debug_start, jvms->debug_end(), _igvn);
683
684 // The call to 'replace_edges_in_range' above might have removed the
685 // reference to ophi that we need at _merge_pointer_idx. The line below make
686 // sure the reference is maintained.
810 return false;
811 }
812
813 // Returns true if at least one of the arguments to the call is an object
814 // that does not escape globally.
815 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
816 if (call->method() != nullptr) {
817 uint max_idx = TypeFunc::Parms + call->method()->arg_size();
818 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
819 Node* p = call->in(idx);
820 if (not_global_escape(p)) {
821 return true;
822 }
823 }
824 } else {
825 const char* name = call->as_CallStaticJava()->_name;
826 assert(name != nullptr, "no name");
827 // no arg escapes through uncommon traps
828 if (strcmp(name, "uncommon_trap") != 0) {
829 // process_call_arguments() assumes that all arguments escape globally
830 const TypeTuple* d = call->tf()->domain();
831 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
832 const Type* at = d->field_at(i);
833 if (at->isa_oopptr() != nullptr) {
834 return true;
835 }
836 }
837 }
838 }
839 return false;
840 }
841
842
843
844 // Utility function for nodes that load an object
845 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
846 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
847 // ThreadLocal has RawPtr type.
848 const Type* t = _igvn->type(n);
849 if (t->make_ptr() != nullptr) {
850 Node* adr = n->in(MemNode::Address);
884 // first IGVN optimization when escape information is still available.
885 record_for_optimizer(n);
886 } else if (n->is_Allocate()) {
887 add_call_node(n->as_Call());
888 record_for_optimizer(n);
889 } else {
890 if (n->is_CallStaticJava()) {
891 const char* name = n->as_CallStaticJava()->_name;
892 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
893 return; // Skip uncommon traps
894 }
895 }
896 // Don't mark as processed since call's arguments have to be processed.
897 delayed_worklist->push(n);
898 // Check if a call returns an object.
899 if ((n->as_Call()->returns_pointer() &&
900 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
901 (n->is_CallStaticJava() &&
902 n->as_CallStaticJava()->is_boxing_method())) {
903 add_call_node(n->as_Call());
904 }
905 }
906 return;
907 }
908 // Put this check here to process call arguments since some call nodes
909 // point to phantom_obj.
910 if (n_ptn == phantom_obj || n_ptn == null_obj) {
911 return; // Skip predefined nodes.
912 }
913 switch (opcode) {
914 case Op_AddP: {
915 Node* base = get_addp_base(n);
916 PointsToNode* ptn_base = ptnode_adr(base->_idx);
917 // Field nodes are created for all field types. They are used in
918 // adjust_scalar_replaceable_state() and split_unique_types().
919 // Note, non-oop fields will have only base edges in Connection
920 // Graph because such fields are not used for oop loads and stores.
921 int offset = address_offset(n, igvn);
922 add_field(n, PointsToNode::NoEscape, offset);
923 if (ptn_base == nullptr) {
924 delayed_worklist->push(n); // Process it later.
925 } else {
926 n_ptn = ptnode_adr(n_idx);
927 add_base(n_ptn->as_Field(), ptn_base);
928 }
929 break;
930 }
931 case Op_CastX2P: {
932 map_ideal_node(n, phantom_obj);
933 break;
934 }
935 case Op_CastPP:
936 case Op_CheckCastPP:
937 case Op_EncodeP:
938 case Op_DecodeN:
939 case Op_EncodePKlass:
940 case Op_DecodeNKlass: {
941 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
942 break;
943 }
944 case Op_CMoveP: {
945 add_local_var(n, PointsToNode::NoEscape);
946 // Do not add edges during first iteration because some could be
947 // not defined yet.
948 delayed_worklist->push(n);
949 break;
950 }
951 case Op_ConP:
952 case Op_ConN:
953 case Op_ConNKlass: {
954 // assume all oop constants globally escape except for null
986 case Op_PartialSubtypeCheck: {
987 // Produces Null or notNull and is used in only in CmpP so
988 // phantom_obj could be used.
989 map_ideal_node(n, phantom_obj); // Result is unknown
990 break;
991 }
992 case Op_Phi: {
993 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
994 // ThreadLocal has RawPtr type.
995 const Type* t = n->as_Phi()->type();
996 if (t->make_ptr() != nullptr) {
997 add_local_var(n, PointsToNode::NoEscape);
998 // Do not add edges during first iteration because some could be
999 // not defined yet.
1000 delayed_worklist->push(n);
1001 }
1002 break;
1003 }
1004 case Op_Proj: {
1005 // we are only interested in the oop result projection from a call
1006 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1007 n->in(0)->as_Call()->returns_pointer()) {
1008 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1009 }
1010 break;
1011 }
1012 case Op_Rethrow: // Exception object escapes
1013 case Op_Return: {
1014 if (n->req() > TypeFunc::Parms &&
1015 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1016 // Treat Return value as LocalVar with GlobalEscape escape state.
1017 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1018 }
1019 break;
1020 }
1021 case Op_CompareAndExchangeP:
1022 case Op_CompareAndExchangeN:
1023 case Op_GetAndSetP:
1024 case Op_GetAndSetN: {
1025 add_objload_to_connection_graph(n, delayed_worklist);
1026 // fall-through
1027 }
1089 if (n->is_Call()) {
1090 process_call_arguments(n->as_Call());
1091 return;
1092 }
1093 assert(n->is_Store() || n->is_LoadStore() ||
1094 (n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr),
1095 "node should be registered already");
1096 int opcode = n->Opcode();
1097 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1098 if (gc_handled) {
1099 return; // Ignore node if already handled by GC.
1100 }
1101 switch (opcode) {
1102 case Op_AddP: {
1103 Node* base = get_addp_base(n);
1104 PointsToNode* ptn_base = ptnode_adr(base->_idx);
1105 assert(ptn_base != nullptr, "field's base should be registered");
1106 add_base(n_ptn->as_Field(), ptn_base);
1107 break;
1108 }
1109 case Op_CastPP:
1110 case Op_CheckCastPP:
1111 case Op_EncodeP:
1112 case Op_DecodeN:
1113 case Op_EncodePKlass:
1114 case Op_DecodeNKlass: {
1115 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1116 break;
1117 }
1118 case Op_CMoveP: {
1119 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1120 Node* in = n->in(i);
1121 if (in == nullptr) {
1122 continue; // ignore null
1123 }
1124 Node* uncast_in = in->uncast();
1125 if (uncast_in->is_top() || uncast_in == n) {
1126 continue; // ignore top or inputs which go back this node
1127 }
1128 PointsToNode* ptn = ptnode_adr(in->_idx);
1143 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1144 // ThreadLocal has RawPtr type.
1145 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1146 for (uint i = 1; i < n->req(); i++) {
1147 Node* in = n->in(i);
1148 if (in == nullptr) {
1149 continue; // ignore null
1150 }
1151 Node* uncast_in = in->uncast();
1152 if (uncast_in->is_top() || uncast_in == n) {
1153 continue; // ignore top or inputs which go back this node
1154 }
1155 PointsToNode* ptn = ptnode_adr(in->_idx);
1156 assert(ptn != nullptr, "node should be registered");
1157 add_edge(n_ptn, ptn);
1158 }
1159 break;
1160 }
1161 case Op_Proj: {
1162 // we are only interested in the oop result projection from a call
1163 assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1164 n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
1165 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1166 break;
1167 }
1168 case Op_Rethrow: // Exception object escapes
1169 case Op_Return: {
1170 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1171 "Unexpected node type");
1172 // Treat Return value as LocalVar with GlobalEscape escape state.
1173 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1174 break;
1175 }
1176 case Op_CompareAndExchangeP:
1177 case Op_CompareAndExchangeN:
1178 case Op_GetAndSetP:
1179 case Op_GetAndSetN:{
1180 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1181 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1182 // fall-through
1183 }
1184 case Op_CompareAndSwapP:
1320 PointsToNode* ptn = ptnode_adr(val->_idx);
1321 assert(ptn != nullptr, "node should be registered");
1322 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1323 // Add edge to object for unsafe access with offset.
1324 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1325 assert(adr_ptn != nullptr, "node should be registered");
1326 if (adr_ptn->is_Field()) {
1327 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
1328 add_edge(adr_ptn, ptn);
1329 }
1330 return true;
1331 }
1332 #ifdef ASSERT
1333 n->dump(1);
1334 assert(false, "not unsafe");
1335 #endif
1336 return false;
1337 }
1338
1339 void ConnectionGraph::add_call_node(CallNode* call) {
1340 assert(call->returns_pointer(), "only for call which returns pointer");
1341 uint call_idx = call->_idx;
1342 if (call->is_Allocate()) {
1343 Node* k = call->in(AllocateNode::KlassNode);
1344 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
1345 assert(kt != nullptr, "TypeKlassPtr required.");
1346 PointsToNode::EscapeState es = PointsToNode::NoEscape;
1347 bool scalar_replaceable = true;
1348 NOT_PRODUCT(const char* nsr_reason = "");
1349 if (call->is_AllocateArray()) {
1350 if (!kt->isa_aryklassptr()) { // StressReflectiveCode
1351 es = PointsToNode::GlobalEscape;
1352 } else {
1353 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
1354 if (length < 0) {
1355 // Not scalar replaceable if the length is not constant.
1356 scalar_replaceable = false;
1357 NOT_PRODUCT(nsr_reason = "has a non-constant length");
1358 } else if (length > EliminateAllocationArraySizeLimit) {
1359 // Not scalar replaceable if the length is too big.
1360 scalar_replaceable = false;
1396 //
1397 // - all oop arguments are escaping globally;
1398 //
1399 // 2. CallStaticJavaNode (execute bytecode analysis if possible):
1400 //
1401 // - the same as CallDynamicJavaNode if can't do bytecode analysis;
1402 //
1403 // - mapped to GlobalEscape JavaObject node if unknown oop is returned;
1404 // - mapped to NoEscape JavaObject node if non-escaping object allocated
1405 // during call is returned;
1406 // - mapped to ArgEscape LocalVar node pointed to object arguments
1407 // which are returned and does not escape during call;
1408 //
1409 // - oop arguments escaping status is defined by bytecode analysis;
1410 //
1411 // For a static call, we know exactly what method is being called.
1412 // Use bytecode estimator to record whether the call's return value escapes.
1413 ciMethod* meth = call->as_CallJava()->method();
1414 if (meth == nullptr) {
1415 const char* name = call->as_CallStaticJava()->_name;
1416 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
1417 // Returns a newly allocated non-escaped object.
1418 add_java_object(call, PointsToNode::NoEscape);
1419 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
1420 } else if (meth->is_boxing_method()) {
1421 // Returns boxing object
1422 PointsToNode::EscapeState es;
1423 vmIntrinsics::ID intr = meth->intrinsic_id();
1424 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
1425 // It does not escape if object is always allocated.
1426 es = PointsToNode::NoEscape;
1427 } else {
1428 // It escapes globally if object could be loaded from cache.
1429 es = PointsToNode::GlobalEscape;
1430 }
1431 add_java_object(call, es);
1432 if (es == PointsToNode::GlobalEscape) {
1433 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
1434 }
1435 } else {
1436 BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
1437 call_analyzer->copy_dependencies(_compile->dependencies());
1438 if (call_analyzer->is_return_allocated()) {
1439 // Returns a newly allocated non-escaped object, simply
1440 // update dependency information.
1441 // Mark it as NoEscape so that objects referenced by
1442 // it's fields will be marked as NoEscape at least.
1443 add_java_object(call, PointsToNode::NoEscape);
1444 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
1445 } else {
1446 // Determine whether any arguments are returned.
1447 const TypeTuple* d = call->tf()->domain();
1448 bool ret_arg = false;
1449 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1450 if (d->field_at(i)->isa_ptr() != nullptr &&
1451 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1452 ret_arg = true;
1453 break;
1454 }
1455 }
1456 if (ret_arg) {
1457 add_local_var(call, PointsToNode::ArgEscape);
1458 } else {
1459 // Returns unknown object.
1460 map_ideal_node(call, phantom_obj);
1461 }
1462 }
1463 }
1464 } else {
1465 // An other type of call, assume the worst case:
1466 // returned value is unknown and globally escapes.
1467 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
1475 #ifdef ASSERT
1476 case Op_Allocate:
1477 case Op_AllocateArray:
1478 case Op_Lock:
1479 case Op_Unlock:
1480 assert(false, "should be done already");
1481 break;
1482 #endif
1483 case Op_ArrayCopy:
1484 case Op_CallLeafNoFP:
1485 // Most array copies are ArrayCopy nodes at this point but there
1486 // are still a few direct calls to the copy subroutines (See
1487 // PhaseStringOpts::copy_string())
1488 is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
1489 call->as_CallLeaf()->is_call_to_arraycopystub();
1490 // fall through
1491 case Op_CallLeafVector:
1492 case Op_CallLeaf: {
1493 // Stub calls, objects do not escape but they are not scale replaceable.
1494 // Adjust escape state for outgoing arguments.
1495 const TypeTuple * d = call->tf()->domain();
1496 bool src_has_oops = false;
1497 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1498 const Type* at = d->field_at(i);
1499 Node *arg = call->in(i);
1500 if (arg == nullptr) {
1501 continue;
1502 }
1503 const Type *aat = _igvn->type(arg);
1504 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
1505 continue;
1506 }
1507 if (arg->is_AddP()) {
1508 //
1509 // The inline_native_clone() case when the arraycopy stub is called
1510 // after the allocation before Initialize and CheckCastPP nodes.
1511 // Or normal arraycopy for object arrays case.
1512 //
1513 // Set AddP's base (Allocate) as not scalar replaceable since
1514 // pointer to the base (with offset) is passed as argument.
1515 //
1516 arg = get_addp_base(arg);
1517 }
1518 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1519 assert(arg_ptn != nullptr, "should be registered");
1520 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
1521 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
1522 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1523 aat->isa_ptr() != nullptr, "expecting an Ptr");
1524 bool arg_has_oops = aat->isa_oopptr() &&
1525 (aat->isa_instptr() ||
1526 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)));
1527 if (i == TypeFunc::Parms) {
1528 src_has_oops = arg_has_oops;
1529 }
1530 //
1531 // src or dst could be j.l.Object when other is basic type array:
1532 //
1533 // arraycopy(char[],0,Object*,0,size);
1534 // arraycopy(Object*,0,char[],0,size);
1535 //
1536 // Don't add edges in such cases.
1537 //
1538 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1539 arg_has_oops && (i > TypeFunc::Parms);
1540 #ifdef ASSERT
1541 if (!(is_arraycopy ||
1542 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1543 (call->as_CallLeaf()->_name != nullptr &&
1544 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1545 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1546 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
1555 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
1556 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1557 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
1558 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1559 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
1560 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
1561 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
1562 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1563 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1564 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1565 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1566 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1567 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1568 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
1569 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
1570 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1571 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1572 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1573 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1574 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
1575 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
1576 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
1577 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1578 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)
1579 ))) {
1580 call->dump();
1581 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1582 }
1583 #endif
1584 // Always process arraycopy's destination object since
1585 // we need to add all possible edges to references in
1586 // source object.
1587 if (arg_esc >= PointsToNode::ArgEscape &&
1588 !arg_is_arraycopy_dest) {
1589 continue;
1590 }
1591 PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1592 if (call->is_ArrayCopy()) {
1593 ArrayCopyNode* ac = call->as_ArrayCopy();
1594 if (ac->is_clonebasic() ||
1617 }
1618 }
1619 }
1620 break;
1621 }
1622 case Op_CallStaticJava: {
1623 // For a static call, we know exactly what method is being called.
1624 // Use bytecode estimator to record the call's escape affects
1625 #ifdef ASSERT
1626 const char* name = call->as_CallStaticJava()->_name;
1627 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1628 #endif
1629 ciMethod* meth = call->as_CallJava()->method();
1630 if ((meth != nullptr) && meth->is_boxing_method()) {
1631 break; // Boxing methods do not modify any oops.
1632 }
1633 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
1634 // fall-through if not a Java method or no analyzer information
1635 if (call_analyzer != nullptr) {
1636 PointsToNode* call_ptn = ptnode_adr(call->_idx);
1637 const TypeTuple* d = call->tf()->domain();
1638 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1639 const Type* at = d->field_at(i);
1640 int k = i - TypeFunc::Parms;
1641 Node* arg = call->in(i);
1642 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1643 if (at->isa_ptr() != nullptr &&
1644 call_analyzer->is_arg_returned(k)) {
1645 // The call returns arguments.
1646 if (call_ptn != nullptr) { // Is call's result used?
1647 assert(call_ptn->is_LocalVar(), "node should be registered");
1648 assert(arg_ptn != nullptr, "node should be registered");
1649 add_edge(call_ptn, arg_ptn);
1650 }
1651 }
1652 if (at->isa_oopptr() != nullptr &&
1653 arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1654 if (!call_analyzer->is_arg_stack(k)) {
1655 // The argument global escapes
1656 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1657 } else {
1661 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1662 }
1663 }
1664 }
1665 }
1666 if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
1667 // The call returns arguments.
1668 assert(call_ptn->edge_count() > 0, "sanity");
1669 if (!call_analyzer->is_return_local()) {
1670 // Returns also unknown object.
1671 add_edge(call_ptn, phantom_obj);
1672 }
1673 }
1674 break;
1675 }
1676 }
1677 default: {
1678 // Fall-through here if not a Java method or no analyzer information
1679 // or some other type of call, assume the worst case: all arguments
1680 // globally escape.
1681 const TypeTuple* d = call->tf()->domain();
1682 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1683 const Type* at = d->field_at(i);
1684 if (at->isa_oopptr() != nullptr) {
1685 Node* arg = call->in(i);
1686 if (arg->is_AddP()) {
1687 arg = get_addp_base(arg);
1688 }
1689 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
1690 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1691 }
1692 }
1693 }
1694 }
1695 }
1696
1697
1698 // Finish Graph construction.
1699 bool ConnectionGraph::complete_connection_graph(
1700 GrowableArray<PointsToNode*>& ptnodes_worklist,
1701 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
2074 PointsToNode* base = i.get();
2075 if (base->is_JavaObject()) {
2076 // Skip Allocate's fields which will be processed later.
2077 if (base->ideal_node()->is_Allocate()) {
2078 return 0;
2079 }
2080 assert(base == null_obj, "only null ptr base expected here");
2081 }
2082 }
2083 if (add_edge(field, phantom_obj)) {
2084 // New edge was added
2085 new_edges++;
2086 add_field_uses_to_worklist(field);
2087 }
2088 return new_edges;
2089 }
2090
2091 // Find fields initializing values for allocations.
2092 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2093 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2094 Node* alloc = pta->ideal_node();
2095
2096 // Do nothing for Allocate nodes since its fields values are
2097 // "known" unless they are initialized by arraycopy/clone.
2098 if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2099 return 0;
2100 }
2101 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");
2102 #ifdef ASSERT
2103 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) {
2104 const char* name = alloc->as_CallStaticJava()->_name;
2105 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
2106 }
2107 #endif
2108 // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2109 int new_edges = 0;
2110 for (EdgeIterator i(pta); i.has_next(); i.next()) {
2111 PointsToNode* field = i.get();
2112 if (field->is_Field() && field->as_Field()->is_oop()) {
2113 if (add_edge(field, phantom_obj)) {
2114 // New edge was added
2115 new_edges++;
2116 add_field_uses_to_worklist(field->as_Field());
2117 }
2118 }
2119 }
2120 return new_edges;
2121 }
2122
2123 // Find fields initializing values for allocations.
2124 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2125 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2126 Node* alloc = pta->ideal_node();
2127 // Do nothing for Call nodes since its fields values are unknown.
2128 if (!alloc->is_Allocate()) {
2129 return 0;
2130 }
2131 InitializeNode* ini = alloc->as_Allocate()->initialization();
2132 bool visited_bottom_offset = false;
2133 GrowableArray<int> offsets_worklist;
2134 int new_edges = 0;
2135
2136 // Check if an oop field's initializing value is recorded and add
2137 // a corresponding null if field's value if it is not recorded.
2138 // Connection Graph does not record a default initialization by null
2139 // captured by Initialize node.
2140 //
2141 for (EdgeIterator i(pta); i.has_next(); i.next()) {
2142 PointsToNode* field = i.get(); // Field (AddP)
2143 if (!field->is_Field() || !field->as_Field()->is_oop()) {
2144 continue; // Not oop field
2145 }
2146 int offset = field->as_Field()->offset();
2147 if (offset == Type::OffsetBot) {
2148 if (!visited_bottom_offset) {
2194 } else {
2195 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2196 tty->print_cr("----------init store has invalid value -----");
2197 store->dump();
2198 val->dump();
2199 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2200 }
2201 for (EdgeIterator j(val); j.has_next(); j.next()) {
2202 PointsToNode* obj = j.get();
2203 if (obj->is_JavaObject()) {
2204 if (!field->points_to(obj->as_JavaObject())) {
2205 missed_obj = obj;
2206 break;
2207 }
2208 }
2209 }
2210 }
2211 if (missed_obj != nullptr) {
2212 tty->print_cr("----------field---------------------------------");
2213 field->dump();
2214 tty->print_cr("----------missed referernce to object-----------");
2215 missed_obj->dump();
2216 tty->print_cr("----------object referernced by init store -----");
2217 store->dump();
2218 val->dump();
2219 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2220 }
2221 }
2222 #endif
2223 } else {
2224 // There could be initializing stores which follow allocation.
2225 // For example, a volatile field store is not collected
2226 // by Initialize node.
2227 //
2228 // Need to check for dependent loads to separate such stores from
2229 // stores which follow loads. For now, add initial value null so
2230 // that compare pointers optimization works correctly.
2231 }
2232 }
2233 if (value == nullptr) {
2234 // A field's initializing value was not recorded. Add null.
2235 if (add_edge(field, null_obj)) {
2236 // New edge was added
2498 assert(field->edge_count() > 0, "sanity");
2499 }
2500 }
2501 }
2502 }
2503 #endif
2504
2505 // Optimize ideal graph.
2506 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
2507 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
2508 Compile* C = _compile;
2509 PhaseIterGVN* igvn = _igvn;
2510 if (EliminateLocks) {
2511 // Mark locks before changing ideal graph.
2512 int cnt = C->macro_count();
2513 for (int i = 0; i < cnt; i++) {
2514 Node *n = C->macro_node(i);
2515 if (n->is_AbstractLock()) { // Lock and Unlock nodes
2516 AbstractLockNode* alock = n->as_AbstractLock();
2517 if (!alock->is_non_esc_obj()) {
2518 if (not_global_escape(alock->obj_node())) {
2519 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
2520 // The lock could be marked eliminated by lock coarsening
2521 // code during first IGVN before EA. Replace coarsened flag
2522 // to eliminate all associated locks/unlocks.
2523 #ifdef ASSERT
2524 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
2525 #endif
2526 alock->set_non_esc_obj();
2527 }
2528 }
2529 }
2530 }
2531 }
2532
2533 if (OptimizePtrCompare) {
2534 for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
2535 Node *n = ptr_cmp_worklist.at(i);
2536 const TypeInt* tcmp = optimize_ptr_compare(n);
2537 if (tcmp->singleton()) {
2538 Node* cmp = igvn->makecon(tcmp);
2539 #ifndef PRODUCT
2540 if (PrintOptimizePtrCompare) {
2541 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
2542 if (Verbose) {
2543 n->dump(1);
2544 }
2545 }
2546 #endif
2547 igvn->replace_node(n, cmp);
2548 }
2549 }
2550 }
2551
2552 // For MemBarStoreStore nodes added in library_call.cpp, check
2553 // escape status of associated AllocateNode and optimize out
2554 // MemBarStoreStore node if the allocated object never escapes.
2555 for (int i = 0; i < storestore_worklist.length(); i++) {
2556 Node* storestore = storestore_worklist.at(i);
2557 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
2558 if (alloc->is_Allocate() && not_global_escape(alloc)) {
2559 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
2560 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
2561 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
2562 igvn->register_new_node_with_optimizer(mb);
2563 igvn->replace_node(storestore, mb);
2564 }
2565 }
2566 }
2567
2568 // Optimize objects compare.
2569 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {
2570 assert(OptimizePtrCompare, "sanity");
2571 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
2572 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
2573 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
2574 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1]
2575
2576 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
2577 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
2578 JavaObjectNode* jobj1 = unique_java_object(n->in(1));
2579 JavaObjectNode* jobj2 = unique_java_object(n->in(2));
2580 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
2581 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
2582
2583 // Check simple cases first.
2697 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2698 assert((src != null_obj) && (dst != null_obj), "not for ConP null");
2699 PointsToNode* ptadr = _nodes.at(n->_idx);
2700 if (ptadr != nullptr) {
2701 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2702 return;
2703 }
2704 Compile* C = _compile;
2705 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2706 map_ideal_node(n, ptadr);
2707 // Add edge from arraycopy node to source object.
2708 (void)add_edge(ptadr, src);
2709 src->set_arraycopy_src();
2710 // Add edge from destination object to arraycopy node.
2711 (void)add_edge(dst, ptadr);
2712 dst->set_arraycopy_dst();
2713 }
2714
2715 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2716 const Type* adr_type = n->as_AddP()->bottom_type();
2717 BasicType bt = T_INT;
2718 if (offset == Type::OffsetBot) {
2719 // Check only oop fields.
2720 if (!adr_type->isa_aryptr() ||
2721 adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
2722 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
2723 // OffsetBot is used to reference array's element. Ignore first AddP.
2724 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
2725 bt = T_OBJECT;
2726 }
2727 }
2728 } else if (offset != oopDesc::klass_offset_in_bytes()) {
2729 if (adr_type->isa_instptr()) {
2730 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2731 if (field != nullptr) {
2732 bt = field->layout_type();
2733 } else {
2734 // Check for unsafe oop field access
2735 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2736 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2737 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2738 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2739 bt = T_OBJECT;
2740 (*unsafe) = true;
2741 }
2742 }
2743 } else if (adr_type->isa_aryptr()) {
2744 if (offset == arrayOopDesc::length_offset_in_bytes()) {
2745 // Ignore array length load.
2746 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
2747 // Ignore first AddP.
2748 } else {
2749 const Type* elemtype = adr_type->isa_aryptr()->elem();
2750 bt = elemtype->array_element_basic_type();
2751 }
2752 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2753 // Allocation initialization, ThreadLocal field access, unsafe access
2754 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2755 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2756 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2757 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2758 bt = T_OBJECT;
2759 }
2760 }
2761 }
2762 // Note: T_NARROWOOP is not classed as a real reference type
2763 return (is_reference_type(bt) || bt == T_NARROWOOP);
2764 }
2765
2766 // Returns unique pointed java object or null.
2767 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
2768 // If the node was created after the escape computation we can't answer.
2769 uint idx = n->_idx;
2770 if (idx >= nodes_size()) {
2913 return true;
2914 }
2915 }
2916 }
2917 }
2918 }
2919 return false;
2920 }
2921
2922 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
2923 const Type *adr_type = phase->type(adr);
2924 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
2925 // We are computing a raw address for a store captured by an Initialize
2926 // compute an appropriate address type. AddP cases #3 and #5 (see below).
2927 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2928 assert(offs != Type::OffsetBot ||
2929 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2930 "offset must be a constant or it is initialization of array");
2931 return offs;
2932 }
2933 const TypePtr *t_ptr = adr_type->isa_ptr();
2934 assert(t_ptr != nullptr, "must be a pointer type");
2935 return t_ptr->offset();
2936 }
2937
2938 Node* ConnectionGraph::get_addp_base(Node *addp) {
2939 assert(addp->is_AddP(), "must be AddP");
2940 //
2941 // AddP cases for Base and Address inputs:
2942 // case #1. Direct object's field reference:
2943 // Allocate
2944 // |
2945 // Proj #5 ( oop result )
2946 // |
2947 // CheckCastPP (cast to instance type)
2948 // | |
2949 // AddP ( base == address )
2950 //
2951 // case #2. Indirect object's field reference:
2952 // Phi
2953 // |
2954 // CastPP (cast to instance type)
2955 // | |
3069 }
3070 return nullptr;
3071 }
3072
3073 //
3074 // Adjust the type and inputs of an AddP which computes the
3075 // address of a field of an instance
3076 //
3077 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3078 PhaseGVN* igvn = _igvn;
3079 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3080 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3081 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3082 if (t == nullptr) {
3083 // We are computing a raw address for a store captured by an Initialize
3084 // compute an appropriate address type (cases #3 and #5).
3085 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3086 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3087 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3088 assert(offs != Type::OffsetBot, "offset must be a constant");
3089 t = base_t->add_offset(offs)->is_oopptr();
3090 }
3091 int inst_id = base_t->instance_id();
3092 assert(!t->is_known_instance() || t->instance_id() == inst_id,
3093 "old type must be non-instance or match new type");
3094
3095 // The type 't' could be subclass of 'base_t'.
3096 // As result t->offset() could be large then base_t's size and it will
3097 // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3098 // constructor verifies correctness of the offset.
3099 //
3100 // It could happened on subclass's branch (from the type profiling
3101 // inlining) which was not eliminated during parsing since the exactness
3102 // of the allocation type was not propagated to the subclass type check.
3103 //
3104 // Or the type 't' could be not related to 'base_t' at all.
3105 // It could happened when CHA type is different from MDO type on a dead path
3106 // (for example, from instanceof check) which is not collapsed during parsing.
3107 //
3108 // Do nothing for such AddP node and don't process its users since
3109 // this code branch will go away.
3110 //
3111 if (!t->is_known_instance() &&
3112 !base_t->maybe_java_subtype_of(t)) {
3113 return false; // bail out
3114 }
3115 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
3116 // Do NOT remove the next line: ensure a new alias index is allocated
3117 // for the instance type. Note: C++ will not remove it since the call
3118 // has side effect.
3119 int alias_idx = _compile->get_alias_index(tinst);
3120 igvn->set_type(addp, tinst);
3121 // record the allocation in the node map
3122 set_map(addp, get_map(base->_idx));
3123 // Set addp's Base and Address to 'base'.
3124 Node *abase = addp->in(AddPNode::Base);
3125 Node *adr = addp->in(AddPNode::Address);
3126 if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3127 adr->in(0)->_idx == (uint)inst_id) {
3128 // Skip AddP cases #3 and #5.
3129 } else {
3130 assert(!abase->is_top(), "sanity"); // AddP case #3
3131 if (abase != base) {
3132 igvn->hash_delete(addp);
3133 addp->set_req(AddPNode::Base, base);
3134 if (abase == adr) {
3135 addp->set_req(AddPNode::Address, base);
3789 ptnode_adr(n->_idx)->dump();
3790 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
3791 #endif
3792 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3793 return;
3794 } else {
3795 Node *val = get_map(jobj->idx()); // CheckCastPP node
3796 TypeNode *tn = n->as_Type();
3797 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3798 assert(tinst != nullptr && tinst->is_known_instance() &&
3799 tinst->instance_id() == jobj->idx() , "instance type expected.");
3800
3801 const Type *tn_type = igvn->type(tn);
3802 const TypeOopPtr *tn_t;
3803 if (tn_type->isa_narrowoop()) {
3804 tn_t = tn_type->make_ptr()->isa_oopptr();
3805 } else {
3806 tn_t = tn_type->isa_oopptr();
3807 }
3808 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
3809 if (tn_type->isa_narrowoop()) {
3810 tn_type = tinst->make_narrowoop();
3811 } else {
3812 tn_type = tinst;
3813 }
3814 igvn->hash_delete(tn);
3815 igvn->set_type(tn, tn_type);
3816 tn->set_type(tn_type);
3817 igvn->hash_insert(tn);
3818 record_for_optimizer(n);
3819 } else {
3820 assert(tn_type == TypePtr::NULL_PTR ||
3821 tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t),
3822 "unexpected type");
3823 continue; // Skip dead path with different type
3824 }
3825 }
3826 } else {
3827 debug_only(n->dump();)
3828 assert(false, "EA: unexpected node");
3829 continue;
3830 }
3831 // push allocation's users on appropriate worklist
3832 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3833 Node *use = n->fast_out(i);
3834 if(use->is_Mem() && use->in(MemNode::Address) == n) {
3835 // Load/store to instance's field
3836 memnode_worklist.append_if_missing(use);
3837 } else if (use->is_MemBar()) {
3838 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3839 memnode_worklist.append_if_missing(use);
3840 }
3841 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3842 Node* addp2 = find_second_addp(use, n);
3843 if (addp2 != nullptr) {
3844 alloc_worklist.append_if_missing(addp2);
3845 }
3846 alloc_worklist.append_if_missing(use);
3847 } else if (use->is_Phi() ||
3848 use->is_CheckCastPP() ||
3849 use->is_EncodeNarrowPtr() ||
3850 use->is_DecodeNarrowPtr() ||
3851 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3852 alloc_worklist.append_if_missing(use);
3853 #ifdef ASSERT
3854 } else if (use->is_Mem()) {
3855 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3856 } else if (use->is_MergeMem()) {
3857 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3858 } else if (use->is_SafePoint()) {
3859 // Look for MergeMem nodes for calls which reference unique allocation
3860 // (through CheckCastPP nodes) even for debug info.
3861 Node* m = use->in(TypeFunc::Memory);
3862 if (m->is_MergeMem()) {
3863 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3864 }
3865 } else if (use->Opcode() == Op_EncodeISOArray) {
3866 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3867 // EncodeISOArray overwrites destination array
3868 memnode_worklist.append_if_missing(use);
3869 }
3870 } else {
3871 uint op = use->Opcode();
3872 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3873 (use->in(MemNode::Memory) == n)) {
3874 // They overwrite memory edge corresponding to destination array,
3875 memnode_worklist.append_if_missing(use);
3876 } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3877 op == Op_CastP2X || op == Op_StoreCM ||
3878 op == Op_FastLock || op == Op_AryEq ||
3879 op == Op_StrComp || op == Op_CountPositives ||
3880 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3881 op == Op_StrEquals || op == Op_VectorizedHashCode ||
3882 op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3883 op == Op_SubTypeCheck ||
3884 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3885 n->dump();
3886 use->dump();
3887 assert(false, "EA: missing allocation reference path");
3888 }
3889 #endif
3890 }
3891 }
3892
3893 }
3894
3895 #ifdef ASSERT
3896 // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints.
3897 for (uint i = 0; i < reducible_merges.size(); i++) {
3898 Node* phi = reducible_merges.at(i);
3899 for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) {
3900 Node* use = phi->fast_out(j);
3901 if (!use->is_SafePoint()) {
3902 phi->dump(-3);
3903 assert(false, "Unexpected user of reducible Phi -> %s", use->Name());
3946 if (memnode_worklist.length() == 0)
3947 return; // nothing to do
3948 while (memnode_worklist.length() != 0) {
3949 Node *n = memnode_worklist.pop();
3950 if (visited.test_set(n->_idx)) {
3951 continue;
3952 }
3953 if (n->is_Phi() || n->is_ClearArray()) {
3954 // we don't need to do anything, but the users must be pushed
3955 } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3956 // we don't need to do anything, but the users must be pushed
3957 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
3958 if (n == nullptr) {
3959 continue;
3960 }
3961 } else if (n->Opcode() == Op_StrCompressedCopy ||
3962 n->Opcode() == Op_EncodeISOArray) {
3963 // get the memory projection
3964 n = n->find_out_with(Op_SCMemProj);
3965 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
3966 } else {
3967 assert(n->is_Mem(), "memory node required.");
3968 Node *addr = n->in(MemNode::Address);
3969 const Type *addr_t = igvn->type(addr);
3970 if (addr_t == Type::TOP) {
3971 continue;
3972 }
3973 assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
3974 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3975 assert ((uint)alias_idx < new_index_end, "wrong alias index");
3976 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3977 if (_compile->failing()) {
3978 return;
3979 }
3980 if (mem != n->in(MemNode::Memory)) {
3981 // We delay the memory edge update since we need old one in
3982 // MergeMem code below when instances memory slices are separated.
3983 set_map(n, mem);
3984 }
3985 if (n->is_Load()) {
3988 // get the memory projection
3989 n = n->find_out_with(Op_SCMemProj);
3990 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
3991 }
3992 }
3993 // push user on appropriate worklist
3994 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3995 Node *use = n->fast_out(i);
3996 if (use->is_Phi() || use->is_ClearArray()) {
3997 memnode_worklist.append_if_missing(use);
3998 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3999 if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
4000 continue;
4001 }
4002 memnode_worklist.append_if_missing(use);
4003 } else if (use->is_MemBar()) {
4004 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4005 memnode_worklist.append_if_missing(use);
4006 }
4007 #ifdef ASSERT
4008 } else if(use->is_Mem()) {
4009 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4010 } else if (use->is_MergeMem()) {
4011 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4012 } else if (use->Opcode() == Op_EncodeISOArray) {
4013 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4014 // EncodeISOArray overwrites destination array
4015 memnode_worklist.append_if_missing(use);
4016 }
4017 } else {
4018 uint op = use->Opcode();
4019 if ((use->in(MemNode::Memory) == n) &&
4020 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4021 // They overwrite memory edge corresponding to destination array,
4022 memnode_worklist.append_if_missing(use);
4023 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4024 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4025 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4026 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
4027 n->dump();
4028 use->dump();
4029 assert(false, "EA: missing memory path");
4030 }
4031 #endif
4032 }
4033 }
4034 }
4035
4036 // Phase 3: Process MergeMem nodes from mergemem_worklist.
4037 // Walk each memory slice moving the first node encountered of each
4038 // instance type to the input corresponding to its alias index.
4039 uint length = mergemem_worklist.length();
4040 for( uint next = 0; next < length; ++next ) {
4041 MergeMemNode* nmm = mergemem_worklist.at(next);
4042 assert(!visited.test_set(nmm->_idx), "should not be visited before");
4043 // Note: we don't want to use MergeMemStream here because we only want to
4044 // scan inputs which exist at the start, not ones we add during processing.
4045 // Note 2: MergeMem may already contains instance memory slices added
4046 // during find_inst_mem() call when memory nodes were processed above.
4093 Node* result = step_through_mergemem(nmm, ni, tinst);
4094 if (result == nmm->base_memory()) {
4095 // Didn't find instance memory, search through general slice recursively.
4096 result = nmm->memory_at(_compile->get_general_index(ni));
4097 result = find_inst_mem(result, ni, orig_phis);
4098 if (_compile->failing()) {
4099 return;
4100 }
4101 nmm->set_memory_at(ni, result);
4102 }
4103 }
4104 igvn->hash_insert(nmm);
4105 record_for_optimizer(nmm);
4106 }
4107
4108 // Phase 4: Update the inputs of non-instance memory Phis and
4109 // the Memory input of memnodes
4110 // First update the inputs of any non-instance Phi's from
4111 // which we split out an instance Phi. Note we don't have
4112 // to recursively process Phi's encountered on the input memory
4113 // chains as is done in split_memory_phi() since they will
4114 // also be processed here.
4115 for (int j = 0; j < orig_phis.length(); j++) {
4116 PhiNode *phi = orig_phis.at(j);
4117 int alias_idx = _compile->get_alias_index(phi->adr_type());
4118 igvn->hash_delete(phi);
4119 for (uint i = 1; i < phi->req(); i++) {
4120 Node *mem = phi->in(i);
4121 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4122 if (_compile->failing()) {
4123 return;
4124 }
4125 if (mem != new_mem) {
4126 phi->set_req(i, new_mem);
4127 }
4128 }
4129 igvn->hash_insert(phi);
4130 record_for_optimizer(phi);
4131 }
4132
4133 // Update the memory inputs of MemNodes with the value we computed
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/c2/barrierSetC2.hpp"
30 #include "libadt/vectset.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/metaspace.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "opto/c2compiler.hpp"
35 #include "opto/arraycopynode.hpp"
36 #include "opto/callnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/compile.hpp"
39 #include "opto/escape.hpp"
40 #include "opto/macro.hpp"
41 #include "opto/phaseX.hpp"
42 #include "opto/movenode.hpp"
43 #include "opto/rootnode.hpp"
44 #include "utilities/macros.hpp"
45
46 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
47 // If ReduceAllocationMerges is enabled we might call split_through_phi during
48 // split_unique_types and that will create additional nodes that need to be
49 // pushed to the ConnectionGraph. The code below bumps the initial capacity of
50 // _nodes by 10% to account for these additional nodes. If capacity is exceeded
51 // the array will be reallocated.
52 _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
145 GrowableArray<SafePointNode*> sfn_worklist;
146 GrowableArray<MergeMemNode*> mergemem_worklist;
147 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
148
149 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
150
151 // 1. Populate Connection Graph (CG) with PointsTo nodes.
152 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space
153 // Initialize worklist
154 if (C->root() != nullptr) {
155 ideal_nodes.push(C->root());
156 }
157 // Processed ideal nodes are unique on ideal_nodes list
158 // but several ideal nodes are mapped to the phantom_obj.
159 // To avoid duplicated entries on the following worklists
160 // add the phantom_obj only once to them.
161 ptnodes_worklist.append(phantom_obj);
162 java_objects_worklist.append(phantom_obj);
163 for( uint next = 0; next < ideal_nodes.size(); ++next ) {
164 Node* n = ideal_nodes.at(next);
165 if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
166 !n->in(MemNode::Address)->is_AddP() &&
167 _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
168 // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
169 Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
170 _igvn->register_new_node_with_optimizer(addp);
171 _igvn->replace_input_of(n, MemNode::Address, addp);
172 ideal_nodes.push(addp);
173 _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
174 }
175 // Create PointsTo nodes and add them to Connection Graph. Called
176 // only once per ideal node since ideal_nodes is Unique_Node list.
177 add_node_to_connection_graph(n, &delayed_worklist);
178 PointsToNode* ptn = ptnode_adr(n->_idx);
179 if (ptn != nullptr && ptn != phantom_obj) {
180 ptnodes_worklist.append(ptn);
181 if (ptn->is_JavaObject()) {
182 java_objects_worklist.append(ptn->as_JavaObject());
183 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
184 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
185 // Only allocations and java static calls results are interesting.
186 non_escaped_allocs_worklist.append(ptn->as_JavaObject());
187 }
188 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
189 oop_fields_worklist.append(ptn->as_Field());
190 }
191 }
192 // Collect some interesting nodes for further use.
193 switch (n->Opcode()) {
194 case Op_MergeMem:
657
658 // The next two inputs are:
659 // (1) A copy of the original pointer to NSR objects.
660 // (2) A selector, used to decide if we need to rematerialize an object
661 // or use the pointer to a NSR object.
662 // See more details of these fields in the declaration of SafePointScalarMergeNode
663 sfpt->add_req(ophi);
664 sfpt->add_req(selector);
665
666 for (uint i = 1; i < ophi->req(); i++) {
667 Node* base = ophi->in(i);
668 JavaObjectNode* ptn = unique_java_object(base);
669
670 // If the base is not scalar replaceable we don't need to register information about
671 // it at this time.
672 if (ptn == nullptr || !ptn->scalar_replaceable()) {
673 continue;
674 }
675
676 AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
677 Unique_Node_List value_worklist;
678 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist);
679 guarantee(value_worklist.size() == 0, "Unimplemented: Valhalla support for 8287061");
680 if (sobj == nullptr) {
681 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
682 return;
683 }
684
685 // Now make a pass over the debug information replacing any references
686 // to the allocated object with "sobj"
687 Node* ccpp = alloc->result_cast();
688 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
689
690 // Register the scalarized object as a candidate for reallocation
691 smerge->add_req(sobj);
692 }
693
694 // Replaces debug information references to "ophi" in "sfpt" with references to "smerge"
695 sfpt->replace_edges_in_range(ophi, smerge, debug_start, jvms->debug_end(), _igvn);
696
697 // The call to 'replace_edges_in_range' above might have removed the
698 // reference to ophi that we need at _merge_pointer_idx. The line below make
699 // sure the reference is maintained.
823 return false;
824 }
825
826 // Returns true if at least one of the arguments to the call is an object
827 // that does not escape globally.
828 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
829 if (call->method() != nullptr) {
830 uint max_idx = TypeFunc::Parms + call->method()->arg_size();
831 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
832 Node* p = call->in(idx);
833 if (not_global_escape(p)) {
834 return true;
835 }
836 }
837 } else {
838 const char* name = call->as_CallStaticJava()->_name;
839 assert(name != nullptr, "no name");
840 // no arg escapes through uncommon traps
841 if (strcmp(name, "uncommon_trap") != 0) {
842 // process_call_arguments() assumes that all arguments escape globally
843 const TypeTuple* d = call->tf()->domain_sig();
844 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
845 const Type* at = d->field_at(i);
846 if (at->isa_oopptr() != nullptr) {
847 return true;
848 }
849 }
850 }
851 }
852 return false;
853 }
854
855
856
857 // Utility function for nodes that load an object
858 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
859 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
860 // ThreadLocal has RawPtr type.
861 const Type* t = _igvn->type(n);
862 if (t->make_ptr() != nullptr) {
863 Node* adr = n->in(MemNode::Address);
897 // first IGVN optimization when escape information is still available.
898 record_for_optimizer(n);
899 } else if (n->is_Allocate()) {
900 add_call_node(n->as_Call());
901 record_for_optimizer(n);
902 } else {
903 if (n->is_CallStaticJava()) {
904 const char* name = n->as_CallStaticJava()->_name;
905 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
906 return; // Skip uncommon traps
907 }
908 }
909 // Don't mark as processed since call's arguments have to be processed.
910 delayed_worklist->push(n);
911 // Check if a call returns an object.
912 if ((n->as_Call()->returns_pointer() &&
913 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
914 (n->is_CallStaticJava() &&
915 n->as_CallStaticJava()->is_boxing_method())) {
916 add_call_node(n->as_Call());
917 } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
918 bool returns_oop = false;
919 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
920 ProjNode* pn = n->fast_out(i)->as_Proj();
921 if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
922 returns_oop = true;
923 }
924 }
925 if (returns_oop) {
926 add_call_node(n->as_Call());
927 }
928 }
929 }
930 return;
931 }
932 // Put this check here to process call arguments since some call nodes
933 // point to phantom_obj.
934 if (n_ptn == phantom_obj || n_ptn == null_obj) {
935 return; // Skip predefined nodes.
936 }
937 switch (opcode) {
938 case Op_AddP: {
939 Node* base = get_addp_base(n);
940 PointsToNode* ptn_base = ptnode_adr(base->_idx);
941 // Field nodes are created for all field types. They are used in
942 // adjust_scalar_replaceable_state() and split_unique_types().
943 // Note, non-oop fields will have only base edges in Connection
944 // Graph because such fields are not used for oop loads and stores.
945 int offset = address_offset(n, igvn);
946 add_field(n, PointsToNode::NoEscape, offset);
947 if (ptn_base == nullptr) {
948 delayed_worklist->push(n); // Process it later.
949 } else {
950 n_ptn = ptnode_adr(n_idx);
951 add_base(n_ptn->as_Field(), ptn_base);
952 }
953 break;
954 }
955 case Op_CastX2P: {
956 map_ideal_node(n, phantom_obj);
957 break;
958 }
959 case Op_InlineType:
960 case Op_CastPP:
961 case Op_CheckCastPP:
962 case Op_EncodeP:
963 case Op_DecodeN:
964 case Op_EncodePKlass:
965 case Op_DecodeNKlass: {
966 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
967 break;
968 }
969 case Op_CMoveP: {
970 add_local_var(n, PointsToNode::NoEscape);
971 // Do not add edges during first iteration because some could be
972 // not defined yet.
973 delayed_worklist->push(n);
974 break;
975 }
976 case Op_ConP:
977 case Op_ConN:
978 case Op_ConNKlass: {
979 // assume all oop constants globally escape except for null
1011 case Op_PartialSubtypeCheck: {
1012 // Produces Null or notNull and is used in only in CmpP so
1013 // phantom_obj could be used.
1014 map_ideal_node(n, phantom_obj); // Result is unknown
1015 break;
1016 }
1017 case Op_Phi: {
1018 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1019 // ThreadLocal has RawPtr type.
1020 const Type* t = n->as_Phi()->type();
1021 if (t->make_ptr() != nullptr) {
1022 add_local_var(n, PointsToNode::NoEscape);
1023 // Do not add edges during first iteration because some could be
1024 // not defined yet.
1025 delayed_worklist->push(n);
1026 }
1027 break;
1028 }
1029 case Op_Proj: {
1030 // we are only interested in the oop result projection from a call
1031 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
1032 (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
1033 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1034 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1035 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1036 }
1037 break;
1038 }
1039 case Op_Rethrow: // Exception object escapes
1040 case Op_Return: {
1041 if (n->req() > TypeFunc::Parms &&
1042 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1043 // Treat Return value as LocalVar with GlobalEscape escape state.
1044 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1045 }
1046 break;
1047 }
1048 case Op_CompareAndExchangeP:
1049 case Op_CompareAndExchangeN:
1050 case Op_GetAndSetP:
1051 case Op_GetAndSetN: {
1052 add_objload_to_connection_graph(n, delayed_worklist);
1053 // fall-through
1054 }
1116 if (n->is_Call()) {
1117 process_call_arguments(n->as_Call());
1118 return;
1119 }
1120 assert(n->is_Store() || n->is_LoadStore() ||
1121 (n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr),
1122 "node should be registered already");
1123 int opcode = n->Opcode();
1124 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1125 if (gc_handled) {
1126 return; // Ignore node if already handled by GC.
1127 }
1128 switch (opcode) {
1129 case Op_AddP: {
1130 Node* base = get_addp_base(n);
1131 PointsToNode* ptn_base = ptnode_adr(base->_idx);
1132 assert(ptn_base != nullptr, "field's base should be registered");
1133 add_base(n_ptn->as_Field(), ptn_base);
1134 break;
1135 }
1136 case Op_InlineType:
1137 case Op_CastPP:
1138 case Op_CheckCastPP:
1139 case Op_EncodeP:
1140 case Op_DecodeN:
1141 case Op_EncodePKlass:
1142 case Op_DecodeNKlass: {
1143 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1144 break;
1145 }
1146 case Op_CMoveP: {
1147 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1148 Node* in = n->in(i);
1149 if (in == nullptr) {
1150 continue; // ignore null
1151 }
1152 Node* uncast_in = in->uncast();
1153 if (uncast_in->is_top() || uncast_in == n) {
1154 continue; // ignore top or inputs which go back this node
1155 }
1156 PointsToNode* ptn = ptnode_adr(in->_idx);
1171 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1172 // ThreadLocal has RawPtr type.
1173 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1174 for (uint i = 1; i < n->req(); i++) {
1175 Node* in = n->in(i);
1176 if (in == nullptr) {
1177 continue; // ignore null
1178 }
1179 Node* uncast_in = in->uncast();
1180 if (uncast_in->is_top() || uncast_in == n) {
1181 continue; // ignore top or inputs which go back this node
1182 }
1183 PointsToNode* ptn = ptnode_adr(in->_idx);
1184 assert(ptn != nullptr, "node should be registered");
1185 add_edge(n_ptn, ptn);
1186 }
1187 break;
1188 }
1189 case Op_Proj: {
1190 // we are only interested in the oop result projection from a call
1191 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1192 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1193 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1194 break;
1195 }
1196 case Op_Rethrow: // Exception object escapes
1197 case Op_Return: {
1198 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1199 "Unexpected node type");
1200 // Treat Return value as LocalVar with GlobalEscape escape state.
1201 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1202 break;
1203 }
1204 case Op_CompareAndExchangeP:
1205 case Op_CompareAndExchangeN:
1206 case Op_GetAndSetP:
1207 case Op_GetAndSetN:{
1208 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1209 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1210 // fall-through
1211 }
1212 case Op_CompareAndSwapP:
1348 PointsToNode* ptn = ptnode_adr(val->_idx);
1349 assert(ptn != nullptr, "node should be registered");
1350 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1351 // Add edge to object for unsafe access with offset.
1352 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1353 assert(adr_ptn != nullptr, "node should be registered");
1354 if (adr_ptn->is_Field()) {
1355 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
1356 add_edge(adr_ptn, ptn);
1357 }
1358 return true;
1359 }
1360 #ifdef ASSERT
1361 n->dump(1);
1362 assert(false, "not unsafe");
1363 #endif
1364 return false;
1365 }
1366
1367 void ConnectionGraph::add_call_node(CallNode* call) {
1368 assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
1369 uint call_idx = call->_idx;
1370 if (call->is_Allocate()) {
1371 Node* k = call->in(AllocateNode::KlassNode);
1372 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
1373 assert(kt != nullptr, "TypeKlassPtr required.");
1374 PointsToNode::EscapeState es = PointsToNode::NoEscape;
1375 bool scalar_replaceable = true;
1376 NOT_PRODUCT(const char* nsr_reason = "");
1377 if (call->is_AllocateArray()) {
1378 if (!kt->isa_aryklassptr()) { // StressReflectiveCode
1379 es = PointsToNode::GlobalEscape;
1380 } else {
1381 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
1382 if (length < 0) {
1383 // Not scalar replaceable if the length is not constant.
1384 scalar_replaceable = false;
1385 NOT_PRODUCT(nsr_reason = "has a non-constant length");
1386 } else if (length > EliminateAllocationArraySizeLimit) {
1387 // Not scalar replaceable if the length is too big.
1388 scalar_replaceable = false;
1424 //
1425 // - all oop arguments are escaping globally;
1426 //
1427 // 2. CallStaticJavaNode (execute bytecode analysis if possible):
1428 //
1429 // - the same as CallDynamicJavaNode if can't do bytecode analysis;
1430 //
1431 // - mapped to GlobalEscape JavaObject node if unknown oop is returned;
1432 // - mapped to NoEscape JavaObject node if non-escaping object allocated
1433 // during call is returned;
1434 // - mapped to ArgEscape LocalVar node pointed to object arguments
1435 // which are returned and does not escape during call;
1436 //
1437 // - oop arguments escaping status is defined by bytecode analysis;
1438 //
1439 // For a static call, we know exactly what method is being called.
1440 // Use bytecode estimator to record whether the call's return value escapes.
1441 ciMethod* meth = call->as_CallJava()->method();
1442 if (meth == nullptr) {
1443 const char* name = call->as_CallStaticJava()->_name;
1444 assert(strncmp(name, "_multianewarray", 15) == 0 ||
1445 strncmp(name, "_load_unknown_inline", 20) == 0, "TODO: add failed case check");
1446 // Returns a newly allocated non-escaped object.
1447 add_java_object(call, PointsToNode::NoEscape);
1448 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
1449 } else if (meth->is_boxing_method()) {
1450 // Returns boxing object
1451 PointsToNode::EscapeState es;
1452 vmIntrinsics::ID intr = meth->intrinsic_id();
1453 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
1454 // It does not escape if object is always allocated.
1455 es = PointsToNode::NoEscape;
1456 } else {
1457 // It escapes globally if object could be loaded from cache.
1458 es = PointsToNode::GlobalEscape;
1459 }
1460 add_java_object(call, es);
1461 if (es == PointsToNode::GlobalEscape) {
1462 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
1463 }
1464 } else {
1465 BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
1466 call_analyzer->copy_dependencies(_compile->dependencies());
1467 if (call_analyzer->is_return_allocated()) {
1468 // Returns a newly allocated non-escaped object, simply
1469 // update dependency information.
1470 // Mark it as NoEscape so that objects referenced by
1471 // it's fields will be marked as NoEscape at least.
1472 add_java_object(call, PointsToNode::NoEscape);
1473 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
1474 } else {
1475 // Determine whether any arguments are returned.
1476 const TypeTuple* d = call->tf()->domain_cc();
1477 bool ret_arg = false;
1478 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1479 if (d->field_at(i)->isa_ptr() != nullptr &&
1480 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1481 ret_arg = true;
1482 break;
1483 }
1484 }
1485 if (ret_arg) {
1486 add_local_var(call, PointsToNode::ArgEscape);
1487 } else {
1488 // Returns unknown object.
1489 map_ideal_node(call, phantom_obj);
1490 }
1491 }
1492 }
1493 } else {
1494 // An other type of call, assume the worst case:
1495 // returned value is unknown and globally escapes.
1496 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
1504 #ifdef ASSERT
1505 case Op_Allocate:
1506 case Op_AllocateArray:
1507 case Op_Lock:
1508 case Op_Unlock:
1509 assert(false, "should be done already");
1510 break;
1511 #endif
1512 case Op_ArrayCopy:
1513 case Op_CallLeafNoFP:
1514 // Most array copies are ArrayCopy nodes at this point but there
1515 // are still a few direct calls to the copy subroutines (See
1516 // PhaseStringOpts::copy_string())
1517 is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
1518 call->as_CallLeaf()->is_call_to_arraycopystub();
1519 // fall through
1520 case Op_CallLeafVector:
1521 case Op_CallLeaf: {
1522 // Stub calls, objects do not escape but they are not scale replaceable.
1523 // Adjust escape state for outgoing arguments.
1524 const TypeTuple * d = call->tf()->domain_sig();
1525 bool src_has_oops = false;
1526 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1527 const Type* at = d->field_at(i);
1528 Node *arg = call->in(i);
1529 if (arg == nullptr) {
1530 continue;
1531 }
1532 const Type *aat = _igvn->type(arg);
1533 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
1534 continue;
1535 }
1536 if (arg->is_AddP()) {
1537 //
1538 // The inline_native_clone() case when the arraycopy stub is called
1539 // after the allocation before Initialize and CheckCastPP nodes.
1540 // Or normal arraycopy for object arrays case.
1541 //
1542 // Set AddP's base (Allocate) as not scalar replaceable since
1543 // pointer to the base (with offset) is passed as argument.
1544 //
1545 arg = get_addp_base(arg);
1546 }
1547 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1548 assert(arg_ptn != nullptr, "should be registered");
1549 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
1550 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
1551 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1552 aat->isa_ptr() != nullptr, "expecting an Ptr");
1553 bool arg_has_oops = aat->isa_oopptr() &&
1554 (aat->isa_instptr() ||
1555 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
1556 (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
1557 aat->isa_aryptr()->is_flat() &&
1558 aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
1559 if (i == TypeFunc::Parms) {
1560 src_has_oops = arg_has_oops;
1561 }
1562 //
1563 // src or dst could be j.l.Object when other is basic type array:
1564 //
1565 // arraycopy(char[],0,Object*,0,size);
1566 // arraycopy(Object*,0,char[],0,size);
1567 //
1568 // Don't add edges in such cases.
1569 //
1570 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1571 arg_has_oops && (i > TypeFunc::Parms);
1572 #ifdef ASSERT
1573 if (!(is_arraycopy ||
1574 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1575 (call->as_CallLeaf()->_name != nullptr &&
1576 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1577 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1578 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
1587 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
1588 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1589 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
1590 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1591 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
1592 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
1593 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
1594 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1595 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1596 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1597 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1598 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1599 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1600 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
1601 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
1602 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1603 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1604 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1605 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1606 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
1607 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1608 strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
1609 strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
1610 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
1611 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
1612 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1613 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)
1614 ))) {
1615 call->dump();
1616 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1617 }
1618 #endif
1619 // Always process arraycopy's destination object since
1620 // we need to add all possible edges to references in
1621 // source object.
1622 if (arg_esc >= PointsToNode::ArgEscape &&
1623 !arg_is_arraycopy_dest) {
1624 continue;
1625 }
1626 PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1627 if (call->is_ArrayCopy()) {
1628 ArrayCopyNode* ac = call->as_ArrayCopy();
1629 if (ac->is_clonebasic() ||
1652 }
1653 }
1654 }
1655 break;
1656 }
1657 case Op_CallStaticJava: {
1658 // For a static call, we know exactly what method is being called.
1659 // Use bytecode estimator to record the call's escape affects
1660 #ifdef ASSERT
1661 const char* name = call->as_CallStaticJava()->_name;
1662 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1663 #endif
1664 ciMethod* meth = call->as_CallJava()->method();
1665 if ((meth != nullptr) && meth->is_boxing_method()) {
1666 break; // Boxing methods do not modify any oops.
1667 }
1668 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
1669 // fall-through if not a Java method or no analyzer information
1670 if (call_analyzer != nullptr) {
1671 PointsToNode* call_ptn = ptnode_adr(call->_idx);
1672 const TypeTuple* d = call->tf()->domain_cc();
1673 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1674 const Type* at = d->field_at(i);
1675 int k = i - TypeFunc::Parms;
1676 Node* arg = call->in(i);
1677 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1678 if (at->isa_ptr() != nullptr &&
1679 call_analyzer->is_arg_returned(k)) {
1680 // The call returns arguments.
1681 if (call_ptn != nullptr) { // Is call's result used?
1682 assert(call_ptn->is_LocalVar(), "node should be registered");
1683 assert(arg_ptn != nullptr, "node should be registered");
1684 add_edge(call_ptn, arg_ptn);
1685 }
1686 }
1687 if (at->isa_oopptr() != nullptr &&
1688 arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1689 if (!call_analyzer->is_arg_stack(k)) {
1690 // The argument global escapes
1691 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1692 } else {
1696 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1697 }
1698 }
1699 }
1700 }
1701 if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
1702 // The call returns arguments.
1703 assert(call_ptn->edge_count() > 0, "sanity");
1704 if (!call_analyzer->is_return_local()) {
1705 // Returns also unknown object.
1706 add_edge(call_ptn, phantom_obj);
1707 }
1708 }
1709 break;
1710 }
1711 }
1712 default: {
1713 // Fall-through here if not a Java method or no analyzer information
1714 // or some other type of call, assume the worst case: all arguments
1715 // globally escape.
1716 const TypeTuple* d = call->tf()->domain_cc();
1717 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1718 const Type* at = d->field_at(i);
1719 if (at->isa_oopptr() != nullptr) {
1720 Node* arg = call->in(i);
1721 if (arg->is_AddP()) {
1722 arg = get_addp_base(arg);
1723 }
1724 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
1725 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1726 }
1727 }
1728 }
1729 }
1730 }
1731
1732
1733 // Finish Graph construction.
1734 bool ConnectionGraph::complete_connection_graph(
1735 GrowableArray<PointsToNode*>& ptnodes_worklist,
1736 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
2109 PointsToNode* base = i.get();
2110 if (base->is_JavaObject()) {
2111 // Skip Allocate's fields which will be processed later.
2112 if (base->ideal_node()->is_Allocate()) {
2113 return 0;
2114 }
2115 assert(base == null_obj, "only null ptr base expected here");
2116 }
2117 }
2118 if (add_edge(field, phantom_obj)) {
2119 // New edge was added
2120 new_edges++;
2121 add_field_uses_to_worklist(field);
2122 }
2123 return new_edges;
2124 }
2125
2126 // Find fields initializing values for allocations.
2127 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2128 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2129 PointsToNode* init_val = phantom_obj;
2130 Node* alloc = pta->ideal_node();
2131
2132 // Do nothing for Allocate nodes since its fields values are
2133 // "known" unless they are initialized by arraycopy/clone.
2134 if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2135 if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
2136 // Non-flat inline type arrays are initialized with
2137 // the default value instead of null. Handle them here.
2138 init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx);
2139 assert(init_val != nullptr, "default value should be registered");
2140 } else {
2141 return 0;
2142 }
2143 }
2144 // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
2145 assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
2146 #ifdef ASSERT
2147 if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
2148 const char* name = alloc->as_CallStaticJava()->_name;
2149 assert(strncmp(name, "_multianewarray", 15) == 0 ||
2150 strncmp(name, "_load_unknown_inline", 20) == 0, "sanity");
2151 }
2152 #endif
2153 // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2154 int new_edges = 0;
2155 for (EdgeIterator i(pta); i.has_next(); i.next()) {
2156 PointsToNode* field = i.get();
2157 if (field->is_Field() && field->as_Field()->is_oop()) {
2158 if (add_edge(field, init_val)) {
2159 // New edge was added
2160 new_edges++;
2161 add_field_uses_to_worklist(field->as_Field());
2162 }
2163 }
2164 }
2165 return new_edges;
2166 }
2167
2168 // Find fields initializing values for allocations.
2169 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2170 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2171 Node* alloc = pta->ideal_node();
2172 // Do nothing for Call nodes since its fields values are unknown.
2173 if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
2174 return 0;
2175 }
2176 InitializeNode* ini = alloc->as_Allocate()->initialization();
2177 bool visited_bottom_offset = false;
2178 GrowableArray<int> offsets_worklist;
2179 int new_edges = 0;
2180
2181 // Check if an oop field's initializing value is recorded and add
2182 // a corresponding null if field's value if it is not recorded.
2183 // Connection Graph does not record a default initialization by null
2184 // captured by Initialize node.
2185 //
2186 for (EdgeIterator i(pta); i.has_next(); i.next()) {
2187 PointsToNode* field = i.get(); // Field (AddP)
2188 if (!field->is_Field() || !field->as_Field()->is_oop()) {
2189 continue; // Not oop field
2190 }
2191 int offset = field->as_Field()->offset();
2192 if (offset == Type::OffsetBot) {
2193 if (!visited_bottom_offset) {
2239 } else {
2240 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2241 tty->print_cr("----------init store has invalid value -----");
2242 store->dump();
2243 val->dump();
2244 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2245 }
2246 for (EdgeIterator j(val); j.has_next(); j.next()) {
2247 PointsToNode* obj = j.get();
2248 if (obj->is_JavaObject()) {
2249 if (!field->points_to(obj->as_JavaObject())) {
2250 missed_obj = obj;
2251 break;
2252 }
2253 }
2254 }
2255 }
2256 if (missed_obj != nullptr) {
2257 tty->print_cr("----------field---------------------------------");
2258 field->dump();
2259 tty->print_cr("----------missed reference to object------------");
2260 missed_obj->dump();
2261 tty->print_cr("----------object referenced by init store-------");
2262 store->dump();
2263 val->dump();
2264 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2265 }
2266 }
2267 #endif
2268 } else {
2269 // There could be initializing stores which follow allocation.
2270 // For example, a volatile field store is not collected
2271 // by Initialize node.
2272 //
2273 // Need to check for dependent loads to separate such stores from
2274 // stores which follow loads. For now, add initial value null so
2275 // that compare pointers optimization works correctly.
2276 }
2277 }
2278 if (value == nullptr) {
2279 // A field's initializing value was not recorded. Add null.
2280 if (add_edge(field, null_obj)) {
2281 // New edge was added
2543 assert(field->edge_count() > 0, "sanity");
2544 }
2545 }
2546 }
2547 }
2548 #endif
2549
2550 // Optimize ideal graph.
2551 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
2552 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
2553 Compile* C = _compile;
2554 PhaseIterGVN* igvn = _igvn;
2555 if (EliminateLocks) {
2556 // Mark locks before changing ideal graph.
2557 int cnt = C->macro_count();
2558 for (int i = 0; i < cnt; i++) {
2559 Node *n = C->macro_node(i);
2560 if (n->is_AbstractLock()) { // Lock and Unlock nodes
2561 AbstractLockNode* alock = n->as_AbstractLock();
2562 if (!alock->is_non_esc_obj()) {
2563 const Type* obj_type = igvn->type(alock->obj_node());
2564 if (not_global_escape(alock->obj_node()) && !obj_type->is_inlinetypeptr()) {
2565 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
2566 // The lock could be marked eliminated by lock coarsening
2567 // code during first IGVN before EA. Replace coarsened flag
2568 // to eliminate all associated locks/unlocks.
2569 #ifdef ASSERT
2570 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
2571 #endif
2572 alock->set_non_esc_obj();
2573 }
2574 }
2575 }
2576 }
2577 }
2578
2579 if (OptimizePtrCompare) {
2580 for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
2581 Node *n = ptr_cmp_worklist.at(i);
2582 const TypeInt* tcmp = optimize_ptr_compare(n);
2583 if (tcmp->singleton()) {
2584 Node* cmp = igvn->makecon(tcmp);
2585 #ifndef PRODUCT
2586 if (PrintOptimizePtrCompare) {
2587 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
2588 if (Verbose) {
2589 n->dump(1);
2590 }
2591 }
2592 #endif
2593 igvn->replace_node(n, cmp);
2594 }
2595 }
2596 }
2597
2598 // For MemBarStoreStore nodes added in library_call.cpp, check
2599 // escape status of associated AllocateNode and optimize out
2600 // MemBarStoreStore node if the allocated object never escapes.
2601 for (int i = 0; i < storestore_worklist.length(); i++) {
2602 Node* storestore = storestore_worklist.at(i);
2603 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
2604 if (alloc->is_Allocate() && not_global_escape(alloc)) {
2605 if (alloc->in(AllocateNode::InlineType) != nullptr) {
2606 // Non-escaping inline type buffer allocations don't require a membar
2607 storestore->as_MemBar()->remove(_igvn);
2608 } else {
2609 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
2610 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
2611 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
2612 igvn->register_new_node_with_optimizer(mb);
2613 igvn->replace_node(storestore, mb);
2614 }
2615 }
2616 }
2617 }
2618
2619 // Optimize objects compare.
2620 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {
2621 assert(OptimizePtrCompare, "sanity");
2622 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
2623 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
2624 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
2625 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1]
2626
2627 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
2628 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
2629 JavaObjectNode* jobj1 = unique_java_object(n->in(1));
2630 JavaObjectNode* jobj2 = unique_java_object(n->in(2));
2631 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
2632 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
2633
2634 // Check simple cases first.
2748 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2749 assert((src != null_obj) && (dst != null_obj), "not for ConP null");
2750 PointsToNode* ptadr = _nodes.at(n->_idx);
2751 if (ptadr != nullptr) {
2752 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2753 return;
2754 }
2755 Compile* C = _compile;
2756 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2757 map_ideal_node(n, ptadr);
2758 // Add edge from arraycopy node to source object.
2759 (void)add_edge(ptadr, src);
2760 src->set_arraycopy_src();
2761 // Add edge from destination object to arraycopy node.
2762 (void)add_edge(dst, ptadr);
2763 dst->set_arraycopy_dst();
2764 }
2765
2766 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2767 const Type* adr_type = n->as_AddP()->bottom_type();
2768 int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
2769 BasicType bt = T_INT;
2770 if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
2771 // Check only oop fields.
2772 if (!adr_type->isa_aryptr() ||
2773 adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
2774 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
2775 // OffsetBot is used to reference array's element. Ignore first AddP.
2776 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
2777 bt = T_OBJECT;
2778 }
2779 }
2780 } else if (offset != oopDesc::klass_offset_in_bytes()) {
2781 if (adr_type->isa_instptr()) {
2782 ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
2783 if (field != nullptr) {
2784 bt = field->layout_type();
2785 } else {
2786 // Check for unsafe oop field access
2787 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2788 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2789 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2790 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2791 bt = T_OBJECT;
2792 (*unsafe) = true;
2793 }
2794 }
2795 } else if (adr_type->isa_aryptr()) {
2796 if (offset == arrayOopDesc::length_offset_in_bytes()) {
2797 // Ignore array length load.
2798 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
2799 // Ignore first AddP.
2800 } else {
2801 const Type* elemtype = adr_type->is_aryptr()->elem();
2802 if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
2803 ciInlineKlass* vk = elemtype->inline_klass();
2804 field_offset += vk->first_field_offset();
2805 bt = vk->get_field_by_offset(field_offset, false)->layout_type();
2806 } else {
2807 bt = elemtype->array_element_basic_type();
2808 }
2809 }
2810 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2811 // Allocation initialization, ThreadLocal field access, unsafe access
2812 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2813 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2814 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2815 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2816 bt = T_OBJECT;
2817 }
2818 }
2819 }
2820 // Note: T_NARROWOOP is not classed as a real reference type
2821 return (is_reference_type(bt) || bt == T_NARROWOOP);
2822 }
2823
2824 // Returns unique pointed java object or null.
2825 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
2826 // If the node was created after the escape computation we can't answer.
2827 uint idx = n->_idx;
2828 if (idx >= nodes_size()) {
2971 return true;
2972 }
2973 }
2974 }
2975 }
2976 }
2977 return false;
2978 }
2979
2980 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
2981 const Type *adr_type = phase->type(adr);
2982 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
2983 // We are computing a raw address for a store captured by an Initialize
2984 // compute an appropriate address type. AddP cases #3 and #5 (see below).
2985 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2986 assert(offs != Type::OffsetBot ||
2987 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2988 "offset must be a constant or it is initialization of array");
2989 return offs;
2990 }
2991 return adr_type->is_ptr()->flat_offset();
2992 }
2993
2994 Node* ConnectionGraph::get_addp_base(Node *addp) {
2995 assert(addp->is_AddP(), "must be AddP");
2996 //
2997 // AddP cases for Base and Address inputs:
2998 // case #1. Direct object's field reference:
2999 // Allocate
3000 // |
3001 // Proj #5 ( oop result )
3002 // |
3003 // CheckCastPP (cast to instance type)
3004 // | |
3005 // AddP ( base == address )
3006 //
3007 // case #2. Indirect object's field reference:
3008 // Phi
3009 // |
3010 // CastPP (cast to instance type)
3011 // | |
3125 }
3126 return nullptr;
3127 }
3128
3129 //
3130 // Adjust the type and inputs of an AddP which computes the
3131 // address of a field of an instance
3132 //
3133 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3134 PhaseGVN* igvn = _igvn;
3135 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3136 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3137 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3138 if (t == nullptr) {
3139 // We are computing a raw address for a store captured by an Initialize
3140 // compute an appropriate address type (cases #3 and #5).
3141 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3142 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3143 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3144 assert(offs != Type::OffsetBot, "offset must be a constant");
3145 if (base_t->isa_aryptr() != nullptr) {
3146 // In the case of a flat inline type array, each field has its
3147 // own slice so we need to extract the field being accessed from
3148 // the address computation
3149 t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
3150 } else {
3151 t = base_t->add_offset(offs)->is_oopptr();
3152 }
3153 }
3154 int inst_id = base_t->instance_id();
3155 assert(!t->is_known_instance() || t->instance_id() == inst_id,
3156 "old type must be non-instance or match new type");
3157
3158 // The type 't' could be subclass of 'base_t'.
3159 // As result t->offset() could be large then base_t's size and it will
3160 // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3161 // constructor verifies correctness of the offset.
3162 //
3163 // It could happened on subclass's branch (from the type profiling
3164 // inlining) which was not eliminated during parsing since the exactness
3165 // of the allocation type was not propagated to the subclass type check.
3166 //
3167 // Or the type 't' could be not related to 'base_t' at all.
3168 // It could happen when CHA type is different from MDO type on a dead path
3169 // (for example, from instanceof check) which is not collapsed during parsing.
3170 //
3171 // Do nothing for such AddP node and don't process its users since
3172 // this code branch will go away.
3173 //
3174 if (!t->is_known_instance() &&
3175 !base_t->maybe_java_subtype_of(t)) {
3176 return false; // bail out
3177 }
3178 const TypePtr* tinst = base_t->add_offset(t->offset());
3179 if (tinst->isa_aryptr() && t->isa_aryptr()) {
3180 // In the case of a flat inline type array, each field has its
3181 // own slice so we need to keep track of the field being accessed.
3182 tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
3183 // Keep array properties (not flat/null-free)
3184 tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
3185 if (tinst == nullptr) {
3186 return false; // Skip dead path with inconsistent properties
3187 }
3188 }
3189
3190 // Do NOT remove the next line: ensure a new alias index is allocated
3191 // for the instance type. Note: C++ will not remove it since the call
3192 // has side effect.
3193 int alias_idx = _compile->get_alias_index(tinst);
3194 igvn->set_type(addp, tinst);
3195 // record the allocation in the node map
3196 set_map(addp, get_map(base->_idx));
3197 // Set addp's Base and Address to 'base'.
3198 Node *abase = addp->in(AddPNode::Base);
3199 Node *adr = addp->in(AddPNode::Address);
3200 if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3201 adr->in(0)->_idx == (uint)inst_id) {
3202 // Skip AddP cases #3 and #5.
3203 } else {
3204 assert(!abase->is_top(), "sanity"); // AddP case #3
3205 if (abase != base) {
3206 igvn->hash_delete(addp);
3207 addp->set_req(AddPNode::Base, base);
3208 if (abase == adr) {
3209 addp->set_req(AddPNode::Address, base);
3863 ptnode_adr(n->_idx)->dump();
3864 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
3865 #endif
3866 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3867 return;
3868 } else {
3869 Node *val = get_map(jobj->idx()); // CheckCastPP node
3870 TypeNode *tn = n->as_Type();
3871 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3872 assert(tinst != nullptr && tinst->is_known_instance() &&
3873 tinst->instance_id() == jobj->idx() , "instance type expected.");
3874
3875 const Type *tn_type = igvn->type(tn);
3876 const TypeOopPtr *tn_t;
3877 if (tn_type->isa_narrowoop()) {
3878 tn_t = tn_type->make_ptr()->isa_oopptr();
3879 } else {
3880 tn_t = tn_type->isa_oopptr();
3881 }
3882 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
3883 if (tn_t->isa_aryptr()) {
3884 // Keep array properties (not flat/null-free)
3885 tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
3886 if (tinst == nullptr) {
3887 continue; // Skip dead path with inconsistent properties
3888 }
3889 }
3890 if (tn_type->isa_narrowoop()) {
3891 tn_type = tinst->make_narrowoop();
3892 } else {
3893 tn_type = tinst;
3894 }
3895 igvn->hash_delete(tn);
3896 igvn->set_type(tn, tn_type);
3897 tn->set_type(tn_type);
3898 igvn->hash_insert(tn);
3899 record_for_optimizer(n);
3900 } else {
3901 assert(tn_type == TypePtr::NULL_PTR ||
3902 tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t),
3903 "unexpected type");
3904 continue; // Skip dead path with different type
3905 }
3906 }
3907 } else {
3908 debug_only(n->dump();)
3909 assert(false, "EA: unexpected node");
3910 continue;
3911 }
3912 // push allocation's users on appropriate worklist
3913 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3914 Node *use = n->fast_out(i);
3915 if (use->is_Mem() && use->in(MemNode::Address) == n) {
3916 // Load/store to instance's field
3917 memnode_worklist.append_if_missing(use);
3918 } else if (use->is_MemBar()) {
3919 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3920 memnode_worklist.append_if_missing(use);
3921 }
3922 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3923 Node* addp2 = find_second_addp(use, n);
3924 if (addp2 != nullptr) {
3925 alloc_worklist.append_if_missing(addp2);
3926 }
3927 alloc_worklist.append_if_missing(use);
3928 } else if (use->is_Phi() ||
3929 use->is_CheckCastPP() ||
3930 use->is_EncodeNarrowPtr() ||
3931 use->is_DecodeNarrowPtr() ||
3932 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3933 alloc_worklist.append_if_missing(use);
3934 #ifdef ASSERT
3935 } else if (use->is_Mem()) {
3936 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3937 } else if (use->is_MergeMem()) {
3938 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3939 } else if (use->is_SafePoint()) {
3940 // Look for MergeMem nodes for calls which reference unique allocation
3941 // (through CheckCastPP nodes) even for debug info.
3942 Node* m = use->in(TypeFunc::Memory);
3943 if (m->is_MergeMem()) {
3944 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3945 }
3946 } else if (use->Opcode() == Op_EncodeISOArray) {
3947 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3948 // EncodeISOArray overwrites destination array
3949 memnode_worklist.append_if_missing(use);
3950 }
3951 } else if (use->Opcode() == Op_Return) {
3952 // Allocation is referenced by field of returned inline type
3953 assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
3954 } else {
3955 uint op = use->Opcode();
3956 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3957 (use->in(MemNode::Memory) == n)) {
3958 // They overwrite memory edge corresponding to destination array,
3959 memnode_worklist.append_if_missing(use);
3960 } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3961 op == Op_CastP2X || op == Op_StoreCM ||
3962 op == Op_FastLock || op == Op_AryEq ||
3963 op == Op_StrComp || op == Op_CountPositives ||
3964 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3965 op == Op_StrEquals || op == Op_VectorizedHashCode ||
3966 op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3967 op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
3968 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3969 n->dump();
3970 use->dump();
3971 assert(false, "EA: missing allocation reference path");
3972 }
3973 #endif
3974 }
3975 }
3976
3977 }
3978
3979 #ifdef ASSERT
3980 // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints.
3981 for (uint i = 0; i < reducible_merges.size(); i++) {
3982 Node* phi = reducible_merges.at(i);
3983 for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) {
3984 Node* use = phi->fast_out(j);
3985 if (!use->is_SafePoint()) {
3986 phi->dump(-3);
3987 assert(false, "Unexpected user of reducible Phi -> %s", use->Name());
4030 if (memnode_worklist.length() == 0)
4031 return; // nothing to do
4032 while (memnode_worklist.length() != 0) {
4033 Node *n = memnode_worklist.pop();
4034 if (visited.test_set(n->_idx)) {
4035 continue;
4036 }
4037 if (n->is_Phi() || n->is_ClearArray()) {
4038 // we don't need to do anything, but the users must be pushed
4039 } else if (n->is_MemBar()) { // Initialize, MemBar nodes
4040 // we don't need to do anything, but the users must be pushed
4041 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4042 if (n == nullptr) {
4043 continue;
4044 }
4045 } else if (n->Opcode() == Op_StrCompressedCopy ||
4046 n->Opcode() == Op_EncodeISOArray) {
4047 // get the memory projection
4048 n = n->find_out_with(Op_SCMemProj);
4049 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4050 } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
4051 strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4052 n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
4053 } else {
4054 assert(n->is_Mem(), "memory node required.");
4055 Node *addr = n->in(MemNode::Address);
4056 const Type *addr_t = igvn->type(addr);
4057 if (addr_t == Type::TOP) {
4058 continue;
4059 }
4060 assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4061 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4062 assert ((uint)alias_idx < new_index_end, "wrong alias index");
4063 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4064 if (_compile->failing()) {
4065 return;
4066 }
4067 if (mem != n->in(MemNode::Memory)) {
4068 // We delay the memory edge update since we need old one in
4069 // MergeMem code below when instances memory slices are separated.
4070 set_map(n, mem);
4071 }
4072 if (n->is_Load()) {
4075 // get the memory projection
4076 n = n->find_out_with(Op_SCMemProj);
4077 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4078 }
4079 }
4080 // push user on appropriate worklist
4081 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4082 Node *use = n->fast_out(i);
4083 if (use->is_Phi() || use->is_ClearArray()) {
4084 memnode_worklist.append_if_missing(use);
4085 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4086 if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
4087 continue;
4088 }
4089 memnode_worklist.append_if_missing(use);
4090 } else if (use->is_MemBar()) {
4091 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4092 memnode_worklist.append_if_missing(use);
4093 }
4094 #ifdef ASSERT
4095 } else if (use->is_Mem()) {
4096 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4097 } else if (use->is_MergeMem()) {
4098 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4099 } else if (use->Opcode() == Op_EncodeISOArray) {
4100 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4101 // EncodeISOArray overwrites destination array
4102 memnode_worklist.append_if_missing(use);
4103 }
4104 } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
4105 strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4106 // store_unknown_inline overwrites destination array
4107 memnode_worklist.append_if_missing(use);
4108 } else {
4109 uint op = use->Opcode();
4110 if ((use->in(MemNode::Memory) == n) &&
4111 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4112 // They overwrite memory edge corresponding to destination array,
4113 memnode_worklist.append_if_missing(use);
4114 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4115 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4116 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4117 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
4118 n->dump();
4119 use->dump();
4120 assert(false, "EA: missing memory path");
4121 }
4122 #endif
4123 }
4124 }
4125 }
4126
4127 // Phase 3: Process MergeMem nodes from mergemem_worklist.
4128 // Walk each memory slice moving the first node encountered of each
4129 // instance type to the input corresponding to its alias index.
4130 uint length = mergemem_worklist.length();
4131 for( uint next = 0; next < length; ++next ) {
4132 MergeMemNode* nmm = mergemem_worklist.at(next);
4133 assert(!visited.test_set(nmm->_idx), "should not be visited before");
4134 // Note: we don't want to use MergeMemStream here because we only want to
4135 // scan inputs which exist at the start, not ones we add during processing.
4136 // Note 2: MergeMem may already contains instance memory slices added
4137 // during find_inst_mem() call when memory nodes were processed above.
4184 Node* result = step_through_mergemem(nmm, ni, tinst);
4185 if (result == nmm->base_memory()) {
4186 // Didn't find instance memory, search through general slice recursively.
4187 result = nmm->memory_at(_compile->get_general_index(ni));
4188 result = find_inst_mem(result, ni, orig_phis);
4189 if (_compile->failing()) {
4190 return;
4191 }
4192 nmm->set_memory_at(ni, result);
4193 }
4194 }
4195 igvn->hash_insert(nmm);
4196 record_for_optimizer(nmm);
4197 }
4198
4199 // Phase 4: Update the inputs of non-instance memory Phis and
4200 // the Memory input of memnodes
4201 // First update the inputs of any non-instance Phi's from
4202 // which we split out an instance Phi. Note we don't have
4203 // to recursively process Phi's encountered on the input memory
4204 // chains as is done in split_memory_phi() since they will
4205 // also be processed here.
4206 for (int j = 0; j < orig_phis.length(); j++) {
4207 PhiNode *phi = orig_phis.at(j);
4208 int alias_idx = _compile->get_alias_index(phi->adr_type());
4209 igvn->hash_delete(phi);
4210 for (uint i = 1; i < phi->req(); i++) {
4211 Node *mem = phi->in(i);
4212 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4213 if (_compile->failing()) {
4214 return;
4215 }
4216 if (mem != new_mem) {
4217 phi->set_req(i, new_mem);
4218 }
4219 }
4220 igvn->hash_insert(phi);
4221 record_for_optimizer(phi);
4222 }
4223
4224 // Update the memory inputs of MemNodes with the value we computed
|