< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page




  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "libadt/vectset.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "opto/c2compiler.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/cfgnode.hpp"
  36 #include "opto/compile.hpp"
  37 #include "opto/escape.hpp"
  38 #include "opto/phaseX.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "utilities/macros.hpp"
  42 #if INCLUDE_G1GC
  43 #include "gc/g1/g1ThreadLocalData.hpp"
  44 #endif // INCLUDE_G1GC
  45 #if INCLUDE_ZGC
  46 #include "gc/z/c2/zBarrierSetC2.hpp"
  47 #endif



  48 
  49 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
  50   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
  51   _in_worklist(C->comp_arena()),
  52   _next_pidx(0),
  53   _collecting(true),
  54   _verify(false),
  55   _compile(C),
  56   _igvn(igvn),
  57   _node_map(C->comp_arena()) {
  58   // Add unknown java object.
  59   add_java_object(C->top(), PointsToNode::GlobalEscape);
  60   phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
  61   // Add ConP(#NULL) and ConN(#NULL) nodes.
  62   Node* oop_null = igvn->zerocon(T_OBJECT);
  63   assert(oop_null->_idx < nodes_size(), "should be created already");
  64   add_java_object(oop_null, PointsToNode::NoEscape);
  65   null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
  66   if (UseCompressedOops) {
  67     Node* noop_null = igvn->zerocon(T_NARROWOOP);


 537               && adr->in(AddPNode::Address)->is_Proj()
 538               && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 539         delayed_worklist->push(n); // Process it later.
 540 #ifdef ASSERT
 541         assert(adr->is_AddP(), "expecting an AddP");
 542         if (adr_type == TypeRawPtr::NOTNULL) {
 543           // Verify a raw address for a store captured by Initialize node.
 544           int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 545           assert(offs != Type::OffsetBot, "offset must be a constant");
 546         }
 547 #endif
 548       } else {
 549         // Ignore copy the displaced header to the BoxNode (OSR compilation).
 550         if (adr->is_BoxLock())
 551           break;
 552         // Stored value escapes in unsafe access.
 553         if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 554           // Pointer stores in G1 barriers looks like unsafe access.
 555           // Ignore such stores to be able scalar replace non-escaping
 556           // allocations.
 557 #if INCLUDE_G1GC
 558           if (UseG1GC && adr->is_AddP()) {
 559             Node* base = get_addp_base(adr);
 560             if (base->Opcode() == Op_LoadP &&
 561                 base->in(MemNode::Address)->is_AddP()) {
 562               adr = base->in(MemNode::Address);
 563               Node* tls = get_addp_base(adr);
 564               if (tls->Opcode() == Op_ThreadLocal) {
 565                 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 566                 if (offs == in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())) {








 567                   break; // G1 pre barrier previous oop value store.
 568                 }
 569                 if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
 570                   break; // G1 post barrier card address store.
 571                 }
 572               }
 573             }
 574           }
 575 #endif
 576           delayed_worklist->push(n); // Process unsafe access later.
 577           break;
 578         }
 579 #ifdef ASSERT
 580         n->dump(1);
 581         assert(false, "not unsafe or G1 barrier raw StoreP");
 582 #endif
 583       }
 584       break;
 585     }
 586     case Op_AryEq:
 587     case Op_HasNegatives:
 588     case Op_StrComp:
 589     case Op_StrEquals:
 590     case Op_StrIndexOf:
 591     case Op_StrIndexOfChar:
 592     case Op_StrInflatedCopy:
 593     case Op_StrCompressedCopy:
 594     case Op_EncodeISOArray: {
 595       add_local_var(n, PointsToNode::ArgEscape);
 596       delayed_worklist->push(n); // Process it later.
 597       break;
 598     }
 599     case Op_ThreadLocal: {
 600       add_java_object(n, PointsToNode::ArgEscape);
 601       break;
 602     }











 603     default:
 604       ; // Do nothing for nodes not related to EA.
 605   }
 606   return;
 607 }
 608 
 609 #ifdef ASSERT
 610 #define ELSE_FAIL(name)                               \
 611       /* Should not be called for not pointer type. */  \
 612       n->dump(1);                                       \
 613       assert(false, name);                              \
 614       break;
 615 #else
 616 #define ELSE_FAIL(name) \
 617       break;
 618 #endif
 619 
 620 // Add final simple edges to graph.
 621 void ConnectionGraph::add_final_edges(Node *n) {
 622   PointsToNode* n_ptn = ptnode_adr(n->_idx);


 798     case Op_EncodeISOArray: {
 799       // char[]/byte[] arrays passed to string intrinsic do not escape but
 800       // they are not scalar replaceable. Adjust escape state for them.
 801       // Start from in(2) edge since in(1) is memory edge.
 802       for (uint i = 2; i < n->req(); i++) {
 803         Node* adr = n->in(i);
 804         const Type* at = _igvn->type(adr);
 805         if (!adr->is_top() && at->isa_ptr()) {
 806           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
 807                  at->isa_ptr() != NULL, "expecting a pointer");
 808           if (adr->is_AddP()) {
 809             adr = get_addp_base(adr);
 810           }
 811           PointsToNode* ptn = ptnode_adr(adr->_idx);
 812           assert(ptn != NULL, "node should be registered");
 813           add_edge(n_ptn, ptn);
 814         }
 815       }
 816       break;
 817     }











 818     default: {
 819       // This method should be called only for EA specific nodes which may
 820       // miss some edges when they were created.
 821 #ifdef ASSERT
 822       n->dump(1);
 823 #endif
 824       guarantee(false, "unknown node");
 825     }
 826   }
 827   return;
 828 }
 829 
 830 void ConnectionGraph::add_call_node(CallNode* call) {
 831   assert(call->returns_pointer(), "only for call which returns pointer");
 832   uint call_idx = call->_idx;
 833   if (call->is_Allocate()) {
 834     Node* k = call->in(AllocateNode::KlassNode);
 835     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 836     assert(kt != NULL, "TypeKlassPtr  required.");
 837     ciKlass* cik = kt->klass();


2354   //     AddP  ( base == top )
2355   //
2356   Node *base = addp->in(AddPNode::Base);
2357   if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
2358     base = addp->in(AddPNode::Address);
2359     while (base->is_AddP()) {
2360       // Case #6 (unsafe access) may have several chained AddP nodes.
2361       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2362       base = base->in(AddPNode::Address);
2363     }
2364     if (base->Opcode() == Op_CheckCastPP &&
2365         base->bottom_type()->isa_rawptr() &&
2366         _igvn->type(base->in(1))->isa_oopptr()) {
2367       base = base->in(1); // Case #9
2368     } else {
2369       Node* uncast_base = base->uncast();
2370       int opcode = uncast_base->Opcode();
2371       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
2372              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
2373              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
2374              (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");

2375     }
2376   }
2377   return base;
2378 }
2379 
2380 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2381   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2382   Node* addp2 = addp->raw_out(0);
2383   if (addp->outcnt() == 1 && addp2->is_AddP() &&
2384       addp2->in(AddPNode::Base) == n &&
2385       addp2->in(AddPNode::Address) == addp) {
2386     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2387     //
2388     // Find array's offset to push it on worklist first and
2389     // as result process an array's element offset first (pushed second)
2390     // to avoid CastPP for the array's offset.
2391     // Otherwise the inserted CastPP (LocalVar) will point to what
2392     // the AddP (Field) points to. Which would be wrong since
2393     // the algorithm expects the CastPP has the same point as
2394     // as AddP's base CheckCastPP (LocalVar).


3087           }
3088         }
3089       }
3090     } else if (n->is_AddP()) {
3091       JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
3092       if (jobj == NULL || jobj == phantom_obj) {
3093 #ifdef ASSERT
3094         ptnode_adr(get_addp_base(n)->_idx)->dump();
3095         ptnode_adr(n->_idx)->dump();
3096         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3097 #endif
3098         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3099         return;
3100       }
3101       Node *base = get_map(jobj->idx());  // CheckCastPP node
3102       if (!split_AddP(n, base)) continue; // wrong type from dead path
3103     } else if (n->is_Phi() ||
3104                n->is_CheckCastPP() ||
3105                n->is_EncodeP() ||
3106                n->is_DecodeN() ||

3107                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
3108       if (visited.test_set(n->_idx)) {
3109         assert(n->is_Phi(), "loops only through Phi's");
3110         continue;  // already processed
3111       }
3112       JavaObjectNode* jobj = unique_java_object(n);
3113       if (jobj == NULL || jobj == phantom_obj) {
3114 #ifdef ASSERT
3115         ptnode_adr(n->_idx)->dump();
3116         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3117 #endif
3118         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3119         return;
3120       } else {
3121         Node *val = get_map(jobj->idx());   // CheckCastPP node
3122         TypeNode *tn = n->as_Type();
3123         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3124         assert(tinst != NULL && tinst->is_known_instance() &&
3125                tinst->instance_id() == jobj->idx() , "instance type expected.");
3126 


3157     // push allocation's users on appropriate worklist
3158     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3159       Node *use = n->fast_out(i);
3160       if(use->is_Mem() && use->in(MemNode::Address) == n) {
3161         // Load/store to instance's field
3162         memnode_worklist.append_if_missing(use);
3163       } else if (use->is_MemBar()) {
3164         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3165           memnode_worklist.append_if_missing(use);
3166         }
3167       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3168         Node* addp2 = find_second_addp(use, n);
3169         if (addp2 != NULL) {
3170           alloc_worklist.append_if_missing(addp2);
3171         }
3172         alloc_worklist.append_if_missing(use);
3173       } else if (use->is_Phi() ||
3174                  use->is_CheckCastPP() ||
3175                  use->is_EncodeNarrowPtr() ||
3176                  use->is_DecodeNarrowPtr() ||

3177                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3178         alloc_worklist.append_if_missing(use);
3179 #ifdef ASSERT
3180       } else if (use->is_Mem()) {
3181         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3182       } else if (use->is_MergeMem()) {
3183         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3184       } else if (use->is_SafePoint()) {
3185         // Look for MergeMem nodes for calls which reference unique allocation
3186         // (through CheckCastPP nodes) even for debug info.
3187         Node* m = use->in(TypeFunc::Memory);
3188         if (m->is_MergeMem()) {
3189           assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3190         }
3191       } else if (use->Opcode() == Op_EncodeISOArray) {
3192         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3193           // EncodeISOArray overwrites destination array
3194           memnode_worklist.append_if_missing(use);
3195         }
3196       } else {
3197         uint op = use->Opcode();
3198         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3199             (use->in(MemNode::Memory) == n)) {
3200           // They overwrite memory edge corresponding to destination array,
3201           memnode_worklist.append_if_missing(use);
3202         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3203               op == Op_CastP2X || op == Op_StoreCM ||
3204               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3205               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3206               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||

3207               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3208           n->dump();
3209           use->dump();
3210           assert(false, "EA: missing allocation reference path");
3211         }
3212 #endif
3213       }
3214     }
3215 
3216   }
3217 
3218   // Go over all ArrayCopy nodes and if one of the inputs has a unique
3219   // type, record it in the ArrayCopy node so we know what memory this
3220   // node uses/modified.
3221   for (int next = 0; next < arraycopy_worklist.length(); next++) {
3222     ArrayCopyNode* ac = arraycopy_worklist.at(next);
3223     Node* dest = ac->in(ArrayCopyNode::Dest);
3224     if (dest->is_AddP()) {
3225       dest = get_addp_base(dest);
3226     }




  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "libadt/vectset.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "opto/c2compiler.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/cfgnode.hpp"
  36 #include "opto/compile.hpp"
  37 #include "opto/escape.hpp"
  38 #include "opto/phaseX.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "utilities/macros.hpp"
  42 #if INCLUDE_G1GC
  43 #include "gc/g1/g1ThreadLocalData.hpp"
  44 #endif // INCLUDE_G1GC
  45 #if INCLUDE_ZGC
  46 #include "gc/z/c2/zBarrierSetC2.hpp"
  47 #endif
  48 #if INCLUDE_SHENANDOAHGC
  49 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  50 #endif
  51 
  52 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
  53   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
  54   _in_worklist(C->comp_arena()),
  55   _next_pidx(0),
  56   _collecting(true),
  57   _verify(false),
  58   _compile(C),
  59   _igvn(igvn),
  60   _node_map(C->comp_arena()) {
  61   // Add unknown java object.
  62   add_java_object(C->top(), PointsToNode::GlobalEscape);
  63   phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
  64   // Add ConP(#NULL) and ConN(#NULL) nodes.
  65   Node* oop_null = igvn->zerocon(T_OBJECT);
  66   assert(oop_null->_idx < nodes_size(), "should be created already");
  67   add_java_object(oop_null, PointsToNode::NoEscape);
  68   null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
  69   if (UseCompressedOops) {
  70     Node* noop_null = igvn->zerocon(T_NARROWOOP);


 540               && adr->in(AddPNode::Address)->is_Proj()
 541               && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 542         delayed_worklist->push(n); // Process it later.
 543 #ifdef ASSERT
 544         assert(adr->is_AddP(), "expecting an AddP");
 545         if (adr_type == TypeRawPtr::NOTNULL) {
 546           // Verify a raw address for a store captured by Initialize node.
 547           int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 548           assert(offs != Type::OffsetBot, "offset must be a constant");
 549         }
 550 #endif
 551       } else {
 552         // Ignore copy the displaced header to the BoxNode (OSR compilation).
 553         if (adr->is_BoxLock())
 554           break;
 555         // Stored value escapes in unsafe access.
 556         if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 557           // Pointer stores in G1 barriers looks like unsafe access.
 558           // Ignore such stores to be able scalar replace non-escaping
 559           // allocations.
 560 #if INCLUDE_G1GC || INCLUDE_SHENANDOAHGC
 561           if ((UseG1GC || UseShenandoahGC) && adr->is_AddP()) {
 562             Node* base = get_addp_base(adr);
 563             if (base->Opcode() == Op_LoadP &&
 564                 base->in(MemNode::Address)->is_AddP()) {
 565               adr = base->in(MemNode::Address);
 566               Node* tls = get_addp_base(adr);
 567               if (tls->Opcode() == Op_ThreadLocal) {
 568                 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 569 #if INCLUDE_G1GC && INCLUDE_SHENANDOAHGC
 570                 const int buf_offset = in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_buffer_offset()
 571                                                         : ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
 572 #elif INCLUDE_G1GC
 573                 const int buf_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
 574 #else
 575                 const int buf_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
 576 #endif
 577                 if (offs == buf_offset) {
 578                   break; // G1 pre barrier previous oop value store.
 579                 }
 580                 if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
 581                   break; // G1 post barrier card address store.
 582                 }
 583               }
 584             }
 585           }
 586 #endif
 587           delayed_worklist->push(n); // Process unsafe access later.
 588           break;
 589         }
 590 #ifdef ASSERT
 591         n->dump(1);
 592         assert(false, "not unsafe or G1 barrier raw StoreP");
 593 #endif
 594       }
 595       break;
 596     }
 597     case Op_AryEq:
 598     case Op_HasNegatives:
 599     case Op_StrComp:
 600     case Op_StrEquals:
 601     case Op_StrIndexOf:
 602     case Op_StrIndexOfChar:
 603     case Op_StrInflatedCopy:
 604     case Op_StrCompressedCopy:
 605     case Op_EncodeISOArray: {
 606       add_local_var(n, PointsToNode::ArgEscape);
 607       delayed_worklist->push(n); // Process it later.
 608       break;
 609     }
 610     case Op_ThreadLocal: {
 611       add_java_object(n, PointsToNode::ArgEscape);
 612       break;
 613     }
 614 #if INCLUDE_SHENANDOAHGC
 615     case Op_ShenandoahReadBarrier:
 616     case Op_ShenandoahWriteBarrier:
 617       // Barriers 'pass through' its arguments. I.e. what goes in, comes out.
 618       // It doesn't escape.
 619       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahBarrierNode::ValueIn), delayed_worklist);
 620       break;
 621     case Op_ShenandoahEnqueueBarrier:
 622       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
 623       break;
 624 #endif
 625     default:
 626       ; // Do nothing for nodes not related to EA.
 627   }
 628   return;
 629 }
 630 
 631 #ifdef ASSERT
 632 #define ELSE_FAIL(name)                               \
 633       /* Should not be called for not pointer type. */  \
 634       n->dump(1);                                       \
 635       assert(false, name);                              \
 636       break;
 637 #else
 638 #define ELSE_FAIL(name) \
 639       break;
 640 #endif
 641 
 642 // Add final simple edges to graph.
 643 void ConnectionGraph::add_final_edges(Node *n) {
 644   PointsToNode* n_ptn = ptnode_adr(n->_idx);


 820     case Op_EncodeISOArray: {
 821       // char[]/byte[] arrays passed to string intrinsic do not escape but
 822       // they are not scalar replaceable. Adjust escape state for them.
 823       // Start from in(2) edge since in(1) is memory edge.
 824       for (uint i = 2; i < n->req(); i++) {
 825         Node* adr = n->in(i);
 826         const Type* at = _igvn->type(adr);
 827         if (!adr->is_top() && at->isa_ptr()) {
 828           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
 829                  at->isa_ptr() != NULL, "expecting a pointer");
 830           if (adr->is_AddP()) {
 831             adr = get_addp_base(adr);
 832           }
 833           PointsToNode* ptn = ptnode_adr(adr->_idx);
 834           assert(ptn != NULL, "node should be registered");
 835           add_edge(n_ptn, ptn);
 836         }
 837       }
 838       break;
 839     }
 840 #if INCLUDE_SHENANDOAHGC
 841     case Op_ShenandoahReadBarrier:
 842     case Op_ShenandoahWriteBarrier:
 843       // Barriers 'pass through' its arguments. I.e. what goes in, comes out.
 844       // It doesn't escape.
 845       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahBarrierNode::ValueIn), NULL);
 846       break;
 847     case Op_ShenandoahEnqueueBarrier:
 848       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
 849       break;
 850 #endif
 851     default: {
 852       // This method should be called only for EA specific nodes which may
 853       // miss some edges when they were created.
 854 #ifdef ASSERT
 855       n->dump(1);
 856 #endif
 857       guarantee(false, "unknown node");
 858     }
 859   }
 860   return;
 861 }
 862 
 863 void ConnectionGraph::add_call_node(CallNode* call) {
 864   assert(call->returns_pointer(), "only for call which returns pointer");
 865   uint call_idx = call->_idx;
 866   if (call->is_Allocate()) {
 867     Node* k = call->in(AllocateNode::KlassNode);
 868     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 869     assert(kt != NULL, "TypeKlassPtr  required.");
 870     ciKlass* cik = kt->klass();


2387   //     AddP  ( base == top )
2388   //
2389   Node *base = addp->in(AddPNode::Base);
2390   if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
2391     base = addp->in(AddPNode::Address);
2392     while (base->is_AddP()) {
2393       // Case #6 (unsafe access) may have several chained AddP nodes.
2394       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2395       base = base->in(AddPNode::Address);
2396     }
2397     if (base->Opcode() == Op_CheckCastPP &&
2398         base->bottom_type()->isa_rawptr() &&
2399         _igvn->type(base->in(1))->isa_oopptr()) {
2400       base = base->in(1); // Case #9
2401     } else {
2402       Node* uncast_base = base->uncast();
2403       int opcode = uncast_base->Opcode();
2404       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
2405              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
2406              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
2407              (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()) ||
2408              uncast_base->is_ShenandoahBarrier(), "sanity");
2409     }
2410   }
2411   return base;
2412 }
2413 
2414 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2415   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2416   Node* addp2 = addp->raw_out(0);
2417   if (addp->outcnt() == 1 && addp2->is_AddP() &&
2418       addp2->in(AddPNode::Base) == n &&
2419       addp2->in(AddPNode::Address) == addp) {
2420     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2421     //
2422     // Find array's offset to push it on worklist first and
2423     // as result process an array's element offset first (pushed second)
2424     // to avoid CastPP for the array's offset.
2425     // Otherwise the inserted CastPP (LocalVar) will point to what
2426     // the AddP (Field) points to. Which would be wrong since
2427     // the algorithm expects the CastPP has the same point as
2428     // as AddP's base CheckCastPP (LocalVar).


3121           }
3122         }
3123       }
3124     } else if (n->is_AddP()) {
3125       JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
3126       if (jobj == NULL || jobj == phantom_obj) {
3127 #ifdef ASSERT
3128         ptnode_adr(get_addp_base(n)->_idx)->dump();
3129         ptnode_adr(n->_idx)->dump();
3130         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3131 #endif
3132         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3133         return;
3134       }
3135       Node *base = get_map(jobj->idx());  // CheckCastPP node
3136       if (!split_AddP(n, base)) continue; // wrong type from dead path
3137     } else if (n->is_Phi() ||
3138                n->is_CheckCastPP() ||
3139                n->is_EncodeP() ||
3140                n->is_DecodeN() ||
3141                n->is_ShenandoahBarrier() ||
3142                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
3143       if (visited.test_set(n->_idx)) {
3144         assert(n->is_Phi(), "loops only through Phi's");
3145         continue;  // already processed
3146       }
3147       JavaObjectNode* jobj = unique_java_object(n);
3148       if (jobj == NULL || jobj == phantom_obj) {
3149 #ifdef ASSERT
3150         ptnode_adr(n->_idx)->dump();
3151         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3152 #endif
3153         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3154         return;
3155       } else {
3156         Node *val = get_map(jobj->idx());   // CheckCastPP node
3157         TypeNode *tn = n->as_Type();
3158         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3159         assert(tinst != NULL && tinst->is_known_instance() &&
3160                tinst->instance_id() == jobj->idx() , "instance type expected.");
3161 


3192     // push allocation's users on appropriate worklist
3193     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3194       Node *use = n->fast_out(i);
3195       if(use->is_Mem() && use->in(MemNode::Address) == n) {
3196         // Load/store to instance's field
3197         memnode_worklist.append_if_missing(use);
3198       } else if (use->is_MemBar()) {
3199         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3200           memnode_worklist.append_if_missing(use);
3201         }
3202       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3203         Node* addp2 = find_second_addp(use, n);
3204         if (addp2 != NULL) {
3205           alloc_worklist.append_if_missing(addp2);
3206         }
3207         alloc_worklist.append_if_missing(use);
3208       } else if (use->is_Phi() ||
3209                  use->is_CheckCastPP() ||
3210                  use->is_EncodeNarrowPtr() ||
3211                  use->is_DecodeNarrowPtr() ||
3212                  use->is_ShenandoahBarrier() ||
3213                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3214         alloc_worklist.append_if_missing(use);
3215 #ifdef ASSERT
3216       } else if (use->is_Mem()) {
3217         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3218       } else if (use->is_MergeMem()) {
3219         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3220       } else if (use->is_SafePoint()) {
3221         // Look for MergeMem nodes for calls which reference unique allocation
3222         // (through CheckCastPP nodes) even for debug info.
3223         Node* m = use->in(TypeFunc::Memory);
3224         if (m->is_MergeMem()) {
3225           assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3226         }
3227       } else if (use->Opcode() == Op_EncodeISOArray) {
3228         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3229           // EncodeISOArray overwrites destination array
3230           memnode_worklist.append_if_missing(use);
3231         }
3232       } else {
3233         uint op = use->Opcode();
3234         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3235             (use->in(MemNode::Memory) == n)) {
3236           // They overwrite memory edge corresponding to destination array,
3237           memnode_worklist.append_if_missing(use);
3238         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3239               op == Op_CastP2X || op == Op_StoreCM ||
3240               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3241               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3242               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3243               op == Op_ShenandoahWBMemProj ||
3244               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3245           n->dump();
3246           use->dump();
3247           assert(false, "EA: missing allocation reference path");
3248         }
3249 #endif
3250       }
3251     }
3252 
3253   }
3254 
3255   // Go over all ArrayCopy nodes and if one of the inputs has a unique
3256   // type, record it in the ArrayCopy node so we know what memory this
3257   // node uses/modified.
3258   for (int next = 0; next < arraycopy_worklist.length(); next++) {
3259     ArrayCopyNode* ac = arraycopy_worklist.at(next);
3260     Node* dest = ac->in(ArrayCopyNode::Dest);
3261     if (dest->is_AddP()) {
3262       dest = get_addp_base(dest);
3263     }


< prev index next >