< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page

1542       assert((ptnode_adr(adr->_idx) == nullptr ||
1543               ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
1544     }
1545 #endif
1546     add_local_var_and_edge(n, PointsToNode::NoEscape,
1547                            adr, delayed_worklist);
1548   }
1549 }
1550 
1551 // Populate Connection Graph with PointsTo nodes and create simple
1552 // connection graph edges.
1553 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1554   assert(!_verify, "this method should not be called for verification");
1555   PhaseGVN* igvn = _igvn;
1556   uint n_idx = n->_idx;
1557   PointsToNode* n_ptn = ptnode_adr(n_idx);
1558   if (n_ptn != nullptr) {
1559     return; // No need to redefine PointsTo node during first iteration.
1560   }
1561   int opcode = n->Opcode();
1562   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
1563   if (gc_handled) {
1564     return; // Ignore node if already handled by GC.
1565   }
1566 
1567   if (n->is_Call()) {
1568     // Arguments to allocation and locking don't escape.
1569     if (n->is_AbstractLock()) {
1570       // Put Lock and Unlock nodes on IGVN worklist to process them during
1571       // first IGVN optimization when escape information is still available.
1572       record_for_optimizer(n);
1573     } else if (n->is_Allocate()) {
1574       add_call_node(n->as_Call());
1575       record_for_optimizer(n);
1576     } else {
1577       if (n->is_CallStaticJava()) {
1578         const char* name = n->as_CallStaticJava()->_name;
1579         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1580           return; // Skip uncommon traps
1581         }
1582       }
1583       // Don't mark as processed since call's arguments have to be processed.
1584       delayed_worklist->push(n);
1585       // Check if a call returns an object.
1586       if ((n->as_Call()->returns_pointer() &&

1764   }
1765   return;
1766 }
1767 
1768 // Add final simple edges to graph.
1769 void ConnectionGraph::add_final_edges(Node *n) {
1770   PointsToNode* n_ptn = ptnode_adr(n->_idx);
1771 #ifdef ASSERT
1772   if (_verify && n_ptn->is_JavaObject())
1773     return; // This method does not change graph for JavaObject.
1774 #endif
1775 
1776   if (n->is_Call()) {
1777     process_call_arguments(n->as_Call());
1778     return;
1779   }
1780   assert(n->is_Store() || n->is_LoadStore() ||
1781          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1782          "node should be registered already");
1783   int opcode = n->Opcode();
1784   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1785   if (gc_handled) {
1786     return; // Ignore node if already handled by GC.
1787   }
1788   switch (opcode) {
1789     case Op_AddP: {
1790       Node* base = get_addp_base(n);
1791       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1792       assert(ptn_base != nullptr, "field's base should be registered");
1793       add_base(n_ptn->as_Field(), ptn_base);
1794       break;
1795     }
1796     case Op_CastPP:
1797     case Op_CheckCastPP:
1798     case Op_EncodeP:
1799     case Op_DecodeN:
1800     case Op_EncodePKlass:
1801     case Op_DecodeNKlass: {
1802       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1803       break;
1804     }
1805     case Op_CMoveP: {
1806       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1807         Node* in = n->in(i);

2208           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2209                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2210           bool arg_has_oops = aat->isa_oopptr() &&
2211                               (aat->isa_instptr() ||
2212                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)));
2213           if (i == TypeFunc::Parms) {
2214             src_has_oops = arg_has_oops;
2215           }
2216           //
2217           // src or dst could be j.l.Object when other is basic type array:
2218           //
2219           //   arraycopy(char[],0,Object*,0,size);
2220           //   arraycopy(Object*,0,char[],0,size);
2221           //
2222           // Don't add edges in such cases.
2223           //
2224           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2225                                        arg_has_oops && (i > TypeFunc::Parms);
2226 #ifdef ASSERT
2227           if (!(is_arraycopy ||
2228                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2229                 (call->as_CallLeaf()->_name != nullptr &&
2230                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2231                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2232                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
2233                   strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
2234                   strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
2235                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
2236                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
2237                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||
2238                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||
2239                   strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
2240                   strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 ||
2241                   strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
2242                   strcmp(call->as_CallLeaf()->_name, "intpoly_montgomeryMult_P256") == 0 ||
2243                   strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 ||
2244                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
2245                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
2246                   strcmp(call->as_CallLeaf()->_name, "kyberNtt") == 0 ||
2247                   strcmp(call->as_CallLeaf()->_name, "kyberInverseNtt") == 0 ||
2248                   strcmp(call->as_CallLeaf()->_name, "kyberNttMult") == 0 ||

3524       } else {
3525         const Type* elemtype = adr_type->isa_aryptr()->elem();
3526         bt = elemtype->array_element_basic_type();
3527       }
3528     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3529       // Allocation initialization, ThreadLocal field access, unsafe access
3530       if (has_oop_node_outs(n)) {
3531         bt = T_OBJECT;
3532       }
3533     }
3534   }
3535   // Note: T_NARROWOOP is not classed as a real reference type
3536   bool res = (is_reference_type(bt) || bt == T_NARROWOOP);
3537   assert(!has_oop_node_outs(n) || res, "sanity: AddP has oop outs, needs to be treated as oop field");
3538   return res;
3539 }
3540 
3541 bool ConnectionGraph::has_oop_node_outs(Node* n) {
3542   return n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3543          n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3544          n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3545          BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n);
3546 }
3547 
3548 // Returns unique pointed java object or null.
3549 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3550   // If the node was created after the escape computation we can't answer.
3551   uint idx = n->_idx;
3552   if (idx >= nodes_size()) {
3553     return nullptr;
3554   }
3555   PointsToNode* ptn = ptnode_adr(idx);
3556   if (ptn == nullptr) {
3557     return nullptr;
3558   }
3559   if (ptn->is_JavaObject()) {
3560     return ptn->as_JavaObject();
3561   }
3562   assert(ptn->is_LocalVar(), "sanity");
3563   // Check all java objects it points to.
3564   JavaObjectNode* jobj = nullptr;
3565   for (EdgeIterator i(ptn); i.has_next(); i.next()) {

4205       } else if (proj_in->is_Call()) {
4206         // ArrayCopy node processed here as well
4207         CallNode *call = proj_in->as_Call();
4208         if (!call->may_modify(toop, igvn)) {
4209           result = call->in(TypeFunc::Memory);
4210         }
4211       } else if (proj_in->is_Initialize()) {
4212         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
4213         // Stop if this is the initialization for the object instance which
4214         // which contains this memory slice, otherwise skip over it.
4215         if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) {
4216           result = proj_in->in(TypeFunc::Memory);
4217         } else if (C->get_alias_index(result->adr_type()) != alias_idx) {
4218           assert(C->get_general_index(alias_idx) == C->get_alias_index(result->adr_type()), "should be projection for the same field/array element");
4219           result = get_map(result->_idx);
4220           assert(result != nullptr, "new projection should have been allocated");
4221           break;
4222         }
4223       } else if (proj_in->is_MemBar()) {
4224         // Check if there is an array copy for a clone
4225         // Step over GC barrier when ReduceInitialCardMarks is disabled
4226         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4227         Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));
4228 
4229         if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
4230           // Stop if it is a clone
4231           ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();
4232           if (ac->may_modify(toop, igvn)) {
4233             break;
4234           }
4235         }
4236         result = proj_in->in(TypeFunc::Memory);
4237       }
4238     } else if (result->is_MergeMem()) {
4239       MergeMemNode *mmem = result->as_MergeMem();
4240       result = step_through_mergemem(mmem, alias_idx, toop);
4241       if (result == mmem->base_memory()) {
4242         // Didn't find instance memory, search through general slice recursively.
4243         result = mmem->memory_at(C->get_general_index(alias_idx));
4244         result = find_inst_mem(result, alias_idx, orig_phis, rec_depth + 1);
4245         if (C->failing()) {
4246           return nullptr;
4247         }

4694         }
4695       } else if (use->Opcode() == Op_EncodeISOArray) {
4696         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4697           // EncodeISOArray overwrites destination array
4698           memnode_worklist.append_if_missing(use);
4699         }
4700       } else {
4701         uint op = use->Opcode();
4702         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4703             (use->in(MemNode::Memory) == n)) {
4704           // They overwrite memory edge corresponding to destination array,
4705           memnode_worklist.append_if_missing(use);
4706         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4707               op == Op_CastP2X ||
4708               op == Op_FastLock || op == Op_AryEq ||
4709               op == Op_StrComp || op == Op_CountPositives ||
4710               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4711               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4712               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4713               op == Op_SubTypeCheck ||
4714               op == Op_ReinterpretS2HF ||
4715               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4716           n->dump();
4717           use->dump();
4718           assert(false, "EA: missing allocation reference path");
4719         }
4720 #endif
4721       }
4722     }
4723 
4724   }
4725 
4726 #ifdef ASSERT
4727   if (VerifyReduceAllocationMerges) {
4728     for (uint i = 0; i < reducible_merges.size(); i++) {
4729       Node* phi = reducible_merges.at(i);
4730 
4731       if (!reduced_merges.member(phi)) {
4732         phi->dump(2);
4733         phi->dump(-2);
4734         assert(false, "This reducible merge wasn't reduced.");
4735       }

4867         assert(n->is_Initialize(), "We only push projections of Initialize");
4868         if (use->as_Proj()->_con == TypeFunc::Memory) { // Ignore precedent edge
4869           memnode_worklist.append_if_missing(use);
4870         }
4871 #ifdef ASSERT
4872       } else if(use->is_Mem()) {
4873         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4874       } else if (use->is_MergeMem()) {
4875         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4876       } else if (use->Opcode() == Op_EncodeISOArray) {
4877         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4878           // EncodeISOArray overwrites destination array
4879           memnode_worklist.append_if_missing(use);
4880         }
4881       } else {
4882         uint op = use->Opcode();
4883         if ((use->in(MemNode::Memory) == n) &&
4884             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4885           // They overwrite memory edge corresponding to destination array,
4886           memnode_worklist.append_if_missing(use);
4887         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4888               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4889               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4890               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
4891           n->dump();
4892           use->dump();
4893           assert(false, "EA: missing memory path");
4894         }
4895 #endif
4896       }
4897     }
4898   }
4899 
4900   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
4901   //            Walk each memory slice moving the first node encountered of each
4902   //            instance type to the input corresponding to its alias index.
4903   uint length = mergemem_worklist.length();
4904   for( uint next = 0; next < length; ++next ) {
4905     MergeMemNode* nmm = mergemem_worklist.at(next);
4906     assert(!visited.test_set(nmm->_idx), "should not be visited before");
4907     // Note: we don't want to use MergeMemStream here because we only want to
4908     // scan inputs which exist at the start, not ones we add during processing.

1542       assert((ptnode_adr(adr->_idx) == nullptr ||
1543               ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
1544     }
1545 #endif
1546     add_local_var_and_edge(n, PointsToNode::NoEscape,
1547                            adr, delayed_worklist);
1548   }
1549 }
1550 
1551 // Populate Connection Graph with PointsTo nodes and create simple
1552 // connection graph edges.
1553 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1554   assert(!_verify, "this method should not be called for verification");
1555   PhaseGVN* igvn = _igvn;
1556   uint n_idx = n->_idx;
1557   PointsToNode* n_ptn = ptnode_adr(n_idx);
1558   if (n_ptn != nullptr) {
1559     return; // No need to redefine PointsTo node during first iteration.
1560   }
1561   int opcode = n->Opcode();





1562   if (n->is_Call()) {
1563     // Arguments to allocation and locking don't escape.
1564     if (n->is_AbstractLock()) {
1565       // Put Lock and Unlock nodes on IGVN worklist to process them during
1566       // first IGVN optimization when escape information is still available.
1567       record_for_optimizer(n);
1568     } else if (n->is_Allocate()) {
1569       add_call_node(n->as_Call());
1570       record_for_optimizer(n);
1571     } else {
1572       if (n->is_CallStaticJava()) {
1573         const char* name = n->as_CallStaticJava()->_name;
1574         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1575           return; // Skip uncommon traps
1576         }
1577       }
1578       // Don't mark as processed since call's arguments have to be processed.
1579       delayed_worklist->push(n);
1580       // Check if a call returns an object.
1581       if ((n->as_Call()->returns_pointer() &&

1759   }
1760   return;
1761 }
1762 
1763 // Add final simple edges to graph.
1764 void ConnectionGraph::add_final_edges(Node *n) {
1765   PointsToNode* n_ptn = ptnode_adr(n->_idx);
1766 #ifdef ASSERT
1767   if (_verify && n_ptn->is_JavaObject())
1768     return; // This method does not change graph for JavaObject.
1769 #endif
1770 
1771   if (n->is_Call()) {
1772     process_call_arguments(n->as_Call());
1773     return;
1774   }
1775   assert(n->is_Store() || n->is_LoadStore() ||
1776          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1777          "node should be registered already");
1778   int opcode = n->Opcode();




1779   switch (opcode) {
1780     case Op_AddP: {
1781       Node* base = get_addp_base(n);
1782       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1783       assert(ptn_base != nullptr, "field's base should be registered");
1784       add_base(n_ptn->as_Field(), ptn_base);
1785       break;
1786     }
1787     case Op_CastPP:
1788     case Op_CheckCastPP:
1789     case Op_EncodeP:
1790     case Op_DecodeN:
1791     case Op_EncodePKlass:
1792     case Op_DecodeNKlass: {
1793       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1794       break;
1795     }
1796     case Op_CMoveP: {
1797       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1798         Node* in = n->in(i);

2199           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2200                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2201           bool arg_has_oops = aat->isa_oopptr() &&
2202                               (aat->isa_instptr() ||
2203                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)));
2204           if (i == TypeFunc::Parms) {
2205             src_has_oops = arg_has_oops;
2206           }
2207           //
2208           // src or dst could be j.l.Object when other is basic type array:
2209           //
2210           //   arraycopy(char[],0,Object*,0,size);
2211           //   arraycopy(Object*,0,char[],0,size);
2212           //
2213           // Don't add edges in such cases.
2214           //
2215           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2216                                        arg_has_oops && (i > TypeFunc::Parms);
2217 #ifdef ASSERT
2218           if (!(is_arraycopy ||

2219                 (call->as_CallLeaf()->_name != nullptr &&
2220                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2221                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2222                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
2223                   strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
2224                   strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
2225                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
2226                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
2227                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||
2228                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||
2229                   strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
2230                   strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 ||
2231                   strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
2232                   strcmp(call->as_CallLeaf()->_name, "intpoly_montgomeryMult_P256") == 0 ||
2233                   strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 ||
2234                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
2235                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
2236                   strcmp(call->as_CallLeaf()->_name, "kyberNtt") == 0 ||
2237                   strcmp(call->as_CallLeaf()->_name, "kyberInverseNtt") == 0 ||
2238                   strcmp(call->as_CallLeaf()->_name, "kyberNttMult") == 0 ||

3514       } else {
3515         const Type* elemtype = adr_type->isa_aryptr()->elem();
3516         bt = elemtype->array_element_basic_type();
3517       }
3518     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3519       // Allocation initialization, ThreadLocal field access, unsafe access
3520       if (has_oop_node_outs(n)) {
3521         bt = T_OBJECT;
3522       }
3523     }
3524   }
3525   // Note: T_NARROWOOP is not classed as a real reference type
3526   bool res = (is_reference_type(bt) || bt == T_NARROWOOP);
3527   assert(!has_oop_node_outs(n) || res, "sanity: AddP has oop outs, needs to be treated as oop field");
3528   return res;
3529 }
3530 
3531 bool ConnectionGraph::has_oop_node_outs(Node* n) {
3532   return n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3533          n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3534          n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN);

3535 }
3536 
3537 // Returns unique pointed java object or null.
3538 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3539   // If the node was created after the escape computation we can't answer.
3540   uint idx = n->_idx;
3541   if (idx >= nodes_size()) {
3542     return nullptr;
3543   }
3544   PointsToNode* ptn = ptnode_adr(idx);
3545   if (ptn == nullptr) {
3546     return nullptr;
3547   }
3548   if (ptn->is_JavaObject()) {
3549     return ptn->as_JavaObject();
3550   }
3551   assert(ptn->is_LocalVar(), "sanity");
3552   // Check all java objects it points to.
3553   JavaObjectNode* jobj = nullptr;
3554   for (EdgeIterator i(ptn); i.has_next(); i.next()) {

4194       } else if (proj_in->is_Call()) {
4195         // ArrayCopy node processed here as well
4196         CallNode *call = proj_in->as_Call();
4197         if (!call->may_modify(toop, igvn)) {
4198           result = call->in(TypeFunc::Memory);
4199         }
4200       } else if (proj_in->is_Initialize()) {
4201         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
4202         // Stop if this is the initialization for the object instance which
4203         // which contains this memory slice, otherwise skip over it.
4204         if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) {
4205           result = proj_in->in(TypeFunc::Memory);
4206         } else if (C->get_alias_index(result->adr_type()) != alias_idx) {
4207           assert(C->get_general_index(alias_idx) == C->get_alias_index(result->adr_type()), "should be projection for the same field/array element");
4208           result = get_map(result->_idx);
4209           assert(result != nullptr, "new projection should have been allocated");
4210           break;
4211         }
4212       } else if (proj_in->is_MemBar()) {
4213         // Check if there is an array copy for a clone
4214         Node* control_proj_ac = proj_in->in(0);


4215 
4216         if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
4217           // Stop if it is a clone
4218           ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();
4219           if (ac->may_modify(toop, igvn)) {
4220             break;
4221           }
4222         }
4223         result = proj_in->in(TypeFunc::Memory);
4224       }
4225     } else if (result->is_MergeMem()) {
4226       MergeMemNode *mmem = result->as_MergeMem();
4227       result = step_through_mergemem(mmem, alias_idx, toop);
4228       if (result == mmem->base_memory()) {
4229         // Didn't find instance memory, search through general slice recursively.
4230         result = mmem->memory_at(C->get_general_index(alias_idx));
4231         result = find_inst_mem(result, alias_idx, orig_phis, rec_depth + 1);
4232         if (C->failing()) {
4233           return nullptr;
4234         }

4681         }
4682       } else if (use->Opcode() == Op_EncodeISOArray) {
4683         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4684           // EncodeISOArray overwrites destination array
4685           memnode_worklist.append_if_missing(use);
4686         }
4687       } else {
4688         uint op = use->Opcode();
4689         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4690             (use->in(MemNode::Memory) == n)) {
4691           // They overwrite memory edge corresponding to destination array,
4692           memnode_worklist.append_if_missing(use);
4693         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4694               op == Op_CastP2X ||
4695               op == Op_FastLock || op == Op_AryEq ||
4696               op == Op_StrComp || op == Op_CountPositives ||
4697               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4698               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4699               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4700               op == Op_SubTypeCheck ||
4701               op == Op_ReinterpretS2HF)) {

4702           n->dump();
4703           use->dump();
4704           assert(false, "EA: missing allocation reference path");
4705         }
4706 #endif
4707       }
4708     }
4709 
4710   }
4711 
4712 #ifdef ASSERT
4713   if (VerifyReduceAllocationMerges) {
4714     for (uint i = 0; i < reducible_merges.size(); i++) {
4715       Node* phi = reducible_merges.at(i);
4716 
4717       if (!reduced_merges.member(phi)) {
4718         phi->dump(2);
4719         phi->dump(-2);
4720         assert(false, "This reducible merge wasn't reduced.");
4721       }

4853         assert(n->is_Initialize(), "We only push projections of Initialize");
4854         if (use->as_Proj()->_con == TypeFunc::Memory) { // Ignore precedent edge
4855           memnode_worklist.append_if_missing(use);
4856         }
4857 #ifdef ASSERT
4858       } else if(use->is_Mem()) {
4859         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4860       } else if (use->is_MergeMem()) {
4861         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4862       } else if (use->Opcode() == Op_EncodeISOArray) {
4863         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4864           // EncodeISOArray overwrites destination array
4865           memnode_worklist.append_if_missing(use);
4866         }
4867       } else {
4868         uint op = use->Opcode();
4869         if ((use->in(MemNode::Memory) == n) &&
4870             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4871           // They overwrite memory edge corresponding to destination array,
4872           memnode_worklist.append_if_missing(use);
4873         } else if (!(op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||

4874               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4875               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
4876           n->dump();
4877           use->dump();
4878           assert(false, "EA: missing memory path");
4879         }
4880 #endif
4881       }
4882     }
4883   }
4884 
4885   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
4886   //            Walk each memory slice moving the first node encountered of each
4887   //            instance type to the input corresponding to its alias index.
4888   uint length = mergemem_worklist.length();
4889   for( uint next = 0; next < length; ++next ) {
4890     MergeMemNode* nmm = mergemem_worklist.at(next);
4891     assert(!visited.test_set(nmm->_idx), "should not be visited before");
4892     // Note: we don't want to use MergeMemStream here because we only want to
4893     // scan inputs which exist at the start, not ones we add during processing.
< prev index next >