< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page




  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "libadt/vectset.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "opto/c2compiler.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/cfgnode.hpp"
  36 #include "opto/compile.hpp"
  37 #include "opto/escape.hpp"
  38 #include "opto/phaseX.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "utilities/macros.hpp"
  42 #if INCLUDE_G1GC
  43 #include "gc/g1/g1ThreadLocalData.hpp"
  44 #endif // INCLUDE_G1GC
  45 #if INCLUDE_ZGC
  46 #include "gc/z/c2/zBarrierSetC2.hpp"
  47 #endif



  48 
  49 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
  50   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
  51   _in_worklist(C->comp_arena()),
  52   _next_pidx(0),
  53   _collecting(true),
  54   _verify(false),
  55   _compile(C),
  56   _igvn(igvn),
  57   _node_map(C->comp_arena()) {
  58   // Add unknown java object.
  59   add_java_object(C->top(), PointsToNode::GlobalEscape);
  60   phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
  61   // Add ConP(#NULL) and ConN(#NULL) nodes.
  62   Node* oop_null = igvn->zerocon(T_OBJECT);
  63   assert(oop_null->_idx < nodes_size(), "should be created already");
  64   add_java_object(oop_null, PointsToNode::NoEscape);
  65   null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
  66   if (UseCompressedOops) {
  67     Node* noop_null = igvn->zerocon(T_NARROWOOP);


 495       else if (UseZGC) {
 496         if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
 497           add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), delayed_worklist);
 498         }
 499       }
 500 #endif
 501       break;
 502     }
 503     case Op_Rethrow: // Exception object escapes
 504     case Op_Return: {
 505       if (n->req() > TypeFunc::Parms &&
 506           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 507         // Treat Return value as LocalVar with GlobalEscape escape state.
 508         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 509                                n->in(TypeFunc::Parms), delayed_worklist);
 510       }
 511       break;
 512     }
 513     case Op_CompareAndExchangeP:
 514     case Op_CompareAndExchangeN:




 515     case Op_GetAndSetP:
 516     case Op_GetAndSetN: {
 517       add_objload_to_connection_graph(n, delayed_worklist);
 518       // fallthrough
 519     }
 520     case Op_StoreP:
 521     case Op_StoreN:
 522     case Op_StoreNKlass:
 523     case Op_StorePConditional:






 524     case Op_WeakCompareAndSwapP:
 525     case Op_WeakCompareAndSwapN:
 526     case Op_CompareAndSwapP:
 527     case Op_CompareAndSwapN: {
 528       Node* adr = n->in(MemNode::Address);
 529       const Type *adr_type = igvn->type(adr);
 530       adr_type = adr_type->make_ptr();
 531       if (adr_type == NULL) {
 532         break; // skip dead nodes
 533       }
 534       if (   adr_type->isa_oopptr()
 535           || (   (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
 536               && adr_type == TypeRawPtr::NOTNULL
 537               && adr->in(AddPNode::Address)->is_Proj()
 538               && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 539         delayed_worklist->push(n); // Process it later.
 540 #ifdef ASSERT
 541         assert(adr->is_AddP(), "expecting an AddP");
 542         if (adr_type == TypeRawPtr::NOTNULL) {
 543           // Verify a raw address for a store captured by Initialize node.
 544           int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 545           assert(offs != Type::OffsetBot, "offset must be a constant");
 546         }
 547 #endif
 548       } else {
 549         // Ignore copy the displaced header to the BoxNode (OSR compilation).
 550         if (adr->is_BoxLock())
 551           break;
 552         // Stored value escapes in unsafe access.
 553         if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 554           // Pointer stores in G1 barriers looks like unsafe access.
 555           // Ignore such stores to be able scalar replace non-escaping
 556           // allocations.
 557 #if INCLUDE_G1GC
 558           if (UseG1GC && adr->is_AddP()) {
 559             Node* base = get_addp_base(adr);
 560             if (base->Opcode() == Op_LoadP &&
 561                 base->in(MemNode::Address)->is_AddP()) {
 562               adr = base->in(MemNode::Address);
 563               Node* tls = get_addp_base(adr);
 564               if (tls->Opcode() == Op_ThreadLocal) {
 565                 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 566                 if (offs == in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())) {








 567                   break; // G1 pre barrier previous oop value store.
 568                 }
 569                 if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
 570                   break; // G1 post barrier card address store.
 571                 }
 572               }
 573             }
 574           }
 575 #endif
 576           delayed_worklist->push(n); // Process unsafe access later.
 577           break;
 578         }
 579 #ifdef ASSERT
 580         n->dump(1);
 581         assert(false, "not unsafe or G1 barrier raw StoreP");
 582 #endif
 583       }
 584       break;
 585     }
 586     case Op_AryEq:
 587     case Op_HasNegatives:
 588     case Op_StrComp:
 589     case Op_StrEquals:
 590     case Op_StrIndexOf:
 591     case Op_StrIndexOfChar:
 592     case Op_StrInflatedCopy:
 593     case Op_StrCompressedCopy:
 594     case Op_EncodeISOArray: {
 595       add_local_var(n, PointsToNode::ArgEscape);
 596       delayed_worklist->push(n); // Process it later.
 597       break;
 598     }
 599     case Op_ThreadLocal: {
 600       add_java_object(n, PointsToNode::ArgEscape);
 601       break;
 602     }







 603     default:
 604       ; // Do nothing for nodes not related to EA.
 605   }
 606   return;
 607 }
 608 
 609 #ifdef ASSERT
 610 #define ELSE_FAIL(name)                               \
 611       /* Should not be called for not pointer type. */  \
 612       n->dump(1);                                       \
 613       assert(false, name);                              \
 614       break;
 615 #else
 616 #define ELSE_FAIL(name) \
 617       break;
 618 #endif
 619 
 620 // Add final simple edges to graph.
 621 void ConnectionGraph::add_final_edges(Node *n) {
 622   PointsToNode* n_ptn = ptnode_adr(n->_idx);


 723     case Op_Return: {
 724       if (n->req() > TypeFunc::Parms &&
 725           _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 726         // Treat Return value as LocalVar with GlobalEscape escape state.
 727         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 728                                n->in(TypeFunc::Parms), NULL);
 729         break;
 730       }
 731       ELSE_FAIL("Op_Return");
 732     }
 733     case Op_StoreP:
 734     case Op_StoreN:
 735     case Op_StoreNKlass:
 736     case Op_StorePConditional:
 737     case Op_CompareAndExchangeP:
 738     case Op_CompareAndExchangeN:
 739     case Op_CompareAndSwapP:
 740     case Op_CompareAndSwapN:
 741     case Op_WeakCompareAndSwapP:
 742     case Op_WeakCompareAndSwapN:








 743     case Op_GetAndSetP:
 744     case Op_GetAndSetN: {
 745       Node* adr = n->in(MemNode::Address);
 746       const Type *adr_type = _igvn->type(adr);
 747       adr_type = adr_type->make_ptr();
 748 #ifdef ASSERT
 749       if (adr_type == NULL) {
 750         n->dump(1);
 751         assert(adr_type != NULL, "dead node should not be on list");
 752         break;
 753       }
 754 #endif
 755       if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||



 756           opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) {
 757         add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
 758       }
 759       if (   adr_type->isa_oopptr()
 760           || (   (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
 761               && adr_type == TypeRawPtr::NOTNULL
 762               && adr->in(AddPNode::Address)->is_Proj()
 763               && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 764         // Point Address to Value
 765         PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 766         assert(adr_ptn != NULL &&
 767                adr_ptn->as_Field()->is_oop(), "node should be registered");
 768         Node *val = n->in(MemNode::ValueIn);
 769         PointsToNode* ptn = ptnode_adr(val->_idx);
 770         assert(ptn != NULL, "node should be registered");
 771         add_edge(adr_ptn, ptn);
 772         break;
 773       } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 774         // Stored value escapes in unsafe access.
 775         Node *val = n->in(MemNode::ValueIn);


 798     case Op_EncodeISOArray: {
 799       // char[]/byte[] arrays passed to string intrinsic do not escape but
 800       // they are not scalar replaceable. Adjust escape state for them.
 801       // Start from in(2) edge since in(1) is memory edge.
 802       for (uint i = 2; i < n->req(); i++) {
 803         Node* adr = n->in(i);
 804         const Type* at = _igvn->type(adr);
 805         if (!adr->is_top() && at->isa_ptr()) {
 806           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
 807                  at->isa_ptr() != NULL, "expecting a pointer");
 808           if (adr->is_AddP()) {
 809             adr = get_addp_base(adr);
 810           }
 811           PointsToNode* ptn = ptnode_adr(adr->_idx);
 812           assert(ptn != NULL, "node should be registered");
 813           add_edge(n_ptn, ptn);
 814         }
 815       }
 816       break;
 817     }








 818     default: {
 819       // This method should be called only for EA specific nodes which may
 820       // miss some edges when they were created.
 821 #ifdef ASSERT
 822       n->dump(1);
 823 #endif
 824       guarantee(false, "unknown node");
 825     }
 826   }
 827   return;
 828 }
 829 
 830 void ConnectionGraph::add_call_node(CallNode* call) {
 831   assert(call->returns_pointer(), "only for call which returns pointer");
 832   uint call_idx = call->_idx;
 833   if (call->is_Allocate()) {
 834     Node* k = call->in(AllocateNode::KlassNode);
 835     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 836     assert(kt != NULL, "TypeKlassPtr  required.");
 837     ciKlass* cik = kt->klass();


2096   BasicType bt = T_INT;
2097   if (offset == Type::OffsetBot) {
2098     // Check only oop fields.
2099     if (!adr_type->isa_aryptr() ||
2100         (adr_type->isa_aryptr()->klass() == NULL) ||
2101          adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
2102       // OffsetBot is used to reference array's element. Ignore first AddP.
2103       if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2104         bt = T_OBJECT;
2105       }
2106     }
2107   } else if (offset != oopDesc::klass_offset_in_bytes()) {
2108     if (adr_type->isa_instptr()) {
2109       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2110       if (field != NULL) {
2111         bt = field->layout_type();
2112       } else {
2113         // Check for unsafe oop field access
2114         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2115             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||




2116             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) {
2117           bt = T_OBJECT;
2118           (*unsafe) = true;
2119         }
2120       }
2121     } else if (adr_type->isa_aryptr()) {
2122       if (offset == arrayOopDesc::length_offset_in_bytes()) {
2123         // Ignore array length load.
2124       } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2125         // Ignore first AddP.
2126       } else {
2127         const Type* elemtype = adr_type->isa_aryptr()->elem();
2128         bt = elemtype->array_element_basic_type();
2129       }
2130     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2131       // Allocation initialization, ThreadLocal field access, unsafe access
2132       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2133           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2134           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) {
2135         bt = T_OBJECT;


2355   //     AddP  ( base == top )
2356   //
2357   Node *base = addp->in(AddPNode::Base);
2358   if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
2359     base = addp->in(AddPNode::Address);
2360     while (base->is_AddP()) {
2361       // Case #6 (unsafe access) may have several chained AddP nodes.
2362       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2363       base = base->in(AddPNode::Address);
2364     }
2365     if (base->Opcode() == Op_CheckCastPP &&
2366         base->bottom_type()->isa_rawptr() &&
2367         _igvn->type(base->in(1))->isa_oopptr()) {
2368       base = base->in(1); // Case #9
2369     } else {
2370       Node* uncast_base = base->uncast();
2371       int opcode = uncast_base->Opcode();
2372       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
2373              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
2374              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
2375              (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");

2376     }
2377   }
2378   return base;
2379 }
2380 
2381 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2382   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2383   Node* addp2 = addp->raw_out(0);
2384   if (addp->outcnt() == 1 && addp2->is_AddP() &&
2385       addp2->in(AddPNode::Base) == n &&
2386       addp2->in(AddPNode::Address) == addp) {
2387     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2388     //
2389     // Find array's offset to push it on worklist first and
2390     // as result process an array's element offset first (pushed second)
2391     // to avoid CastPP for the array's offset.
2392     // Otherwise the inserted CastPP (LocalVar) will point to what
2393     // the AddP (Field) points to. Which would be wrong since
2394     // the algorithm expects the CastPP has the same point as
2395     // as AddP's base CheckCastPP (LocalVar).


3093           }
3094         }
3095       }
3096     } else if (n->is_AddP()) {
3097       JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
3098       if (jobj == NULL || jobj == phantom_obj) {
3099 #ifdef ASSERT
3100         ptnode_adr(get_addp_base(n)->_idx)->dump();
3101         ptnode_adr(n->_idx)->dump();
3102         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3103 #endif
3104         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3105         return;
3106       }
3107       Node *base = get_map(jobj->idx());  // CheckCastPP node
3108       if (!split_AddP(n, base)) continue; // wrong type from dead path
3109     } else if (n->is_Phi() ||
3110                n->is_CheckCastPP() ||
3111                n->is_EncodeP() ||
3112                n->is_DecodeN() ||

3113                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
3114       if (visited.test_set(n->_idx)) {
3115         assert(n->is_Phi(), "loops only through Phi's");
3116         continue;  // already processed
3117       }
3118       JavaObjectNode* jobj = unique_java_object(n);
3119       if (jobj == NULL || jobj == phantom_obj) {
3120 #ifdef ASSERT
3121         ptnode_adr(n->_idx)->dump();
3122         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3123 #endif
3124         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3125         return;
3126       } else {
3127         Node *val = get_map(jobj->idx());   // CheckCastPP node
3128         TypeNode *tn = n->as_Type();
3129         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3130         assert(tinst != NULL && tinst->is_known_instance() &&
3131                tinst->instance_id() == jobj->idx() , "instance type expected.");
3132 


3163     // push allocation's users on appropriate worklist
3164     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3165       Node *use = n->fast_out(i);
3166       if(use->is_Mem() && use->in(MemNode::Address) == n) {
3167         // Load/store to instance's field
3168         memnode_worklist.append_if_missing(use);
3169       } else if (use->is_MemBar()) {
3170         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3171           memnode_worklist.append_if_missing(use);
3172         }
3173       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3174         Node* addp2 = find_second_addp(use, n);
3175         if (addp2 != NULL) {
3176           alloc_worklist.append_if_missing(addp2);
3177         }
3178         alloc_worklist.append_if_missing(use);
3179       } else if (use->is_Phi() ||
3180                  use->is_CheckCastPP() ||
3181                  use->is_EncodeNarrowPtr() ||
3182                  use->is_DecodeNarrowPtr() ||

3183                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3184         alloc_worklist.append_if_missing(use);
3185 #ifdef ASSERT
3186       } else if (use->is_Mem()) {
3187         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3188       } else if (use->is_MergeMem()) {
3189         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3190       } else if (use->is_SafePoint()) {
3191         // Look for MergeMem nodes for calls which reference unique allocation
3192         // (through CheckCastPP nodes) even for debug info.
3193         Node* m = use->in(TypeFunc::Memory);
3194         if (m->is_MergeMem()) {
3195           assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3196         }
3197       } else if (use->Opcode() == Op_EncodeISOArray) {
3198         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3199           // EncodeISOArray overwrites destination array
3200           memnode_worklist.append_if_missing(use);
3201         }
3202       } else {




  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "libadt/vectset.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "opto/c2compiler.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/cfgnode.hpp"
  36 #include "opto/compile.hpp"
  37 #include "opto/escape.hpp"
  38 #include "opto/phaseX.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "utilities/macros.hpp"
  42 #if INCLUDE_G1GC
  43 #include "gc/g1/g1ThreadLocalData.hpp"
  44 #endif // INCLUDE_G1GC
  45 #if INCLUDE_ZGC
  46 #include "gc/z/c2/zBarrierSetC2.hpp"
  47 #endif
  48 #if INCLUDE_SHENANDOAHGC
  49 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  50 #endif
  51 
  52 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
  53   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
  54   _in_worklist(C->comp_arena()),
  55   _next_pidx(0),
  56   _collecting(true),
  57   _verify(false),
  58   _compile(C),
  59   _igvn(igvn),
  60   _node_map(C->comp_arena()) {
  61   // Add unknown java object.
  62   add_java_object(C->top(), PointsToNode::GlobalEscape);
  63   phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
  64   // Add ConP(#NULL) and ConN(#NULL) nodes.
  65   Node* oop_null = igvn->zerocon(T_OBJECT);
  66   assert(oop_null->_idx < nodes_size(), "should be created already");
  67   add_java_object(oop_null, PointsToNode::NoEscape);
  68   null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
  69   if (UseCompressedOops) {
  70     Node* noop_null = igvn->zerocon(T_NARROWOOP);


 498       else if (UseZGC) {
 499         if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
 500           add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), delayed_worklist);
 501         }
 502       }
 503 #endif
 504       break;
 505     }
 506     case Op_Rethrow: // Exception object escapes
 507     case Op_Return: {
 508       if (n->req() > TypeFunc::Parms &&
 509           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 510         // Treat Return value as LocalVar with GlobalEscape escape state.
 511         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 512                                n->in(TypeFunc::Parms), delayed_worklist);
 513       }
 514       break;
 515     }
 516     case Op_CompareAndExchangeP:
 517     case Op_CompareAndExchangeN:
 518 #if INCLUDE_SHENANDOAHGC
 519     case Op_ShenandoahCompareAndExchangeP:
 520     case Op_ShenandoahCompareAndExchangeN:
 521 #endif
 522     case Op_GetAndSetP:
 523     case Op_GetAndSetN: {
 524       add_objload_to_connection_graph(n, delayed_worklist);
 525       // fallthrough
 526     }
 527     case Op_StoreP:
 528     case Op_StoreN:
 529     case Op_StoreNKlass:
 530     case Op_StorePConditional:
 531 #if INCLUDE_SHENANDOAHGC
 532     case Op_ShenandoahWeakCompareAndSwapP:
 533     case Op_ShenandoahWeakCompareAndSwapN:
 534     case Op_ShenandoahCompareAndSwapP:
 535     case Op_ShenandoahCompareAndSwapN:
 536 #endif
 537     case Op_WeakCompareAndSwapP:
 538     case Op_WeakCompareAndSwapN:
 539     case Op_CompareAndSwapP:
 540     case Op_CompareAndSwapN: {
 541       Node* adr = n->in(MemNode::Address);
 542       const Type *adr_type = igvn->type(adr);
 543       adr_type = adr_type->make_ptr();
 544       if (adr_type == NULL) {
 545         break; // skip dead nodes
 546       }
 547       if (   adr_type->isa_oopptr()
 548           || (   (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
 549               && adr_type == TypeRawPtr::NOTNULL
 550               && adr->in(AddPNode::Address)->is_Proj()
 551               && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 552         delayed_worklist->push(n); // Process it later.
 553 #ifdef ASSERT
 554         assert(adr->is_AddP(), "expecting an AddP");
 555         if (adr_type == TypeRawPtr::NOTNULL) {
 556           // Verify a raw address for a store captured by Initialize node.
 557           int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 558           assert(offs != Type::OffsetBot, "offset must be a constant");
 559         }
 560 #endif
 561       } else {
 562         // Ignore copy the displaced header to the BoxNode (OSR compilation).
 563         if (adr->is_BoxLock())
 564           break;
 565         // Stored value escapes in unsafe access.
 566         if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 567           // Pointer stores in G1 barriers looks like unsafe access.
 568           // Ignore such stores to be able scalar replace non-escaping
 569           // allocations.
 570 #if INCLUDE_G1GC || INCLUDE_SHENANDOAHGC
 571           if ((UseG1GC || UseShenandoahGC) && adr->is_AddP()) {
 572             Node* base = get_addp_base(adr);
 573             if (base->Opcode() == Op_LoadP &&
 574                 base->in(MemNode::Address)->is_AddP()) {
 575               adr = base->in(MemNode::Address);
 576               Node* tls = get_addp_base(adr);
 577               if (tls->Opcode() == Op_ThreadLocal) {
 578                 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 579 #if INCLUDE_G1GC && INCLUDE_SHENANDOAHGC
 580                 const int buf_offset = in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_buffer_offset()
 581                                                         : ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
 582 #elif INCLUDE_G1GC
 583                 const int buf_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
 584 #else
 585                 const int buf_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
 586 #endif
 587                 if (offs == buf_offset) {
 588                   break; // G1 pre barrier previous oop value store.
 589                 }
 590                 if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
 591                   break; // G1 post barrier card address store.
 592                 }
 593               }
 594             }
 595           }
 596 #endif
 597           delayed_worklist->push(n); // Process unsafe access later.
 598           break;
 599         }
 600 #ifdef ASSERT
 601         n->dump(1);
 602         assert(false, "not unsafe or G1 barrier raw StoreP");
 603 #endif
 604       }
 605       break;
 606     }
 607     case Op_AryEq:
 608     case Op_HasNegatives:
 609     case Op_StrComp:
 610     case Op_StrEquals:
 611     case Op_StrIndexOf:
 612     case Op_StrIndexOfChar:
 613     case Op_StrInflatedCopy:
 614     case Op_StrCompressedCopy:
 615     case Op_EncodeISOArray: {
 616       add_local_var(n, PointsToNode::ArgEscape);
 617       delayed_worklist->push(n); // Process it later.
 618       break;
 619     }
 620     case Op_ThreadLocal: {
 621       add_java_object(n, PointsToNode::ArgEscape);
 622       break;
 623     }
 624 #if INCLUDE_SHENANDOAHGC
 625     case Op_ShenandoahEnqueueBarrier:
 626       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
 627       break;
 628     case Op_ShenandoahLoadReferenceBarrier:
 629       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist);
 630 #endif
 631     default:
 632       ; // Do nothing for nodes not related to EA.
 633   }
 634   return;
 635 }
 636 
 637 #ifdef ASSERT
 638 #define ELSE_FAIL(name)                               \
 639       /* Should not be called for not pointer type. */  \
 640       n->dump(1);                                       \
 641       assert(false, name);                              \
 642       break;
 643 #else
 644 #define ELSE_FAIL(name) \
 645       break;
 646 #endif
 647 
 648 // Add final simple edges to graph.
 649 void ConnectionGraph::add_final_edges(Node *n) {
 650   PointsToNode* n_ptn = ptnode_adr(n->_idx);


 751     case Op_Return: {
 752       if (n->req() > TypeFunc::Parms &&
 753           _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 754         // Treat Return value as LocalVar with GlobalEscape escape state.
 755         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 756                                n->in(TypeFunc::Parms), NULL);
 757         break;
 758       }
 759       ELSE_FAIL("Op_Return");
 760     }
 761     case Op_StoreP:
 762     case Op_StoreN:
 763     case Op_StoreNKlass:
 764     case Op_StorePConditional:
 765     case Op_CompareAndExchangeP:
 766     case Op_CompareAndExchangeN:
 767     case Op_CompareAndSwapP:
 768     case Op_CompareAndSwapN:
 769     case Op_WeakCompareAndSwapP:
 770     case Op_WeakCompareAndSwapN:
 771 #if INCLUDE_SHENANDOAHGC
 772     case Op_ShenandoahCompareAndExchangeP:
 773     case Op_ShenandoahCompareAndExchangeN:
 774     case Op_ShenandoahCompareAndSwapP:
 775     case Op_ShenandoahCompareAndSwapN:
 776     case Op_ShenandoahWeakCompareAndSwapP:
 777     case Op_ShenandoahWeakCompareAndSwapN:
 778 #endif
 779     case Op_GetAndSetP:
 780     case Op_GetAndSetN: {
 781       Node* adr = n->in(MemNode::Address);
 782       const Type *adr_type = _igvn->type(adr);
 783       adr_type = adr_type->make_ptr();
 784 #ifdef ASSERT
 785       if (adr_type == NULL) {
 786         n->dump(1);
 787         assert(adr_type != NULL, "dead node should not be on list");
 788         break;
 789       }
 790 #endif
 791       if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||
 792 #if INCLUDE_SHENANDOAHGC
 793           opcode == Op_ShenandoahCompareAndExchangeN || opcode == Op_ShenandoahCompareAndExchangeP ||
 794 #endif
 795           opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) {
 796         add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
 797       }
 798       if (   adr_type->isa_oopptr()
 799           || (   (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
 800               && adr_type == TypeRawPtr::NOTNULL
 801               && adr->in(AddPNode::Address)->is_Proj()
 802               && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 803         // Point Address to Value
 804         PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 805         assert(adr_ptn != NULL &&
 806                adr_ptn->as_Field()->is_oop(), "node should be registered");
 807         Node *val = n->in(MemNode::ValueIn);
 808         PointsToNode* ptn = ptnode_adr(val->_idx);
 809         assert(ptn != NULL, "node should be registered");
 810         add_edge(adr_ptn, ptn);
 811         break;
 812       } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 813         // Stored value escapes in unsafe access.
 814         Node *val = n->in(MemNode::ValueIn);


 837     case Op_EncodeISOArray: {
 838       // char[]/byte[] arrays passed to string intrinsic do not escape but
 839       // they are not scalar replaceable. Adjust escape state for them.
 840       // Start from in(2) edge since in(1) is memory edge.
 841       for (uint i = 2; i < n->req(); i++) {
 842         Node* adr = n->in(i);
 843         const Type* at = _igvn->type(adr);
 844         if (!adr->is_top() && at->isa_ptr()) {
 845           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
 846                  at->isa_ptr() != NULL, "expecting a pointer");
 847           if (adr->is_AddP()) {
 848             adr = get_addp_base(adr);
 849           }
 850           PointsToNode* ptn = ptnode_adr(adr->_idx);
 851           assert(ptn != NULL, "node should be registered");
 852           add_edge(n_ptn, ptn);
 853         }
 854       }
 855       break;
 856     }
 857 #if INCLUDE_SHENANDOAHGC
 858     case Op_ShenandoahEnqueueBarrier:
 859       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
 860       break;
 861     case Op_ShenandoahLoadReferenceBarrier:
 862       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL);
 863       break;
 864 #endif
 865     default: {
 866       // This method should be called only for EA specific nodes which may
 867       // miss some edges when they were created.
 868 #ifdef ASSERT
 869       n->dump(1);
 870 #endif
 871       guarantee(false, "unknown node");
 872     }
 873   }
 874   return;
 875 }
 876 
 877 void ConnectionGraph::add_call_node(CallNode* call) {
 878   assert(call->returns_pointer(), "only for call which returns pointer");
 879   uint call_idx = call->_idx;
 880   if (call->is_Allocate()) {
 881     Node* k = call->in(AllocateNode::KlassNode);
 882     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 883     assert(kt != NULL, "TypeKlassPtr  required.");
 884     ciKlass* cik = kt->klass();


2143   BasicType bt = T_INT;
2144   if (offset == Type::OffsetBot) {
2145     // Check only oop fields.
2146     if (!adr_type->isa_aryptr() ||
2147         (adr_type->isa_aryptr()->klass() == NULL) ||
2148          adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
2149       // OffsetBot is used to reference array's element. Ignore first AddP.
2150       if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2151         bt = T_OBJECT;
2152       }
2153     }
2154   } else if (offset != oopDesc::klass_offset_in_bytes()) {
2155     if (adr_type->isa_instptr()) {
2156       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2157       if (field != NULL) {
2158         bt = field->layout_type();
2159       } else {
2160         // Check for unsafe oop field access
2161         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2162             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2163 #if INCLUDE_SHENANDOAHGC
2164             n->has_out_with(Op_ShenandoahCompareAndExchangeP) || n->has_out_with(Op_ShenandoahCompareAndExchangeN) ||
2165             n->has_out_with(Op_ShenandoahCompareAndSwapP, Op_ShenandoahCompareAndSwapN, Op_ShenandoahWeakCompareAndSwapP, Op_ShenandoahWeakCompareAndSwapN) ||
2166 #endif
2167             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) {
2168           bt = T_OBJECT;
2169           (*unsafe) = true;
2170         }
2171       }
2172     } else if (adr_type->isa_aryptr()) {
2173       if (offset == arrayOopDesc::length_offset_in_bytes()) {
2174         // Ignore array length load.
2175       } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2176         // Ignore first AddP.
2177       } else {
2178         const Type* elemtype = adr_type->isa_aryptr()->elem();
2179         bt = elemtype->array_element_basic_type();
2180       }
2181     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2182       // Allocation initialization, ThreadLocal field access, unsafe access
2183       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2184           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2185           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) {
2186         bt = T_OBJECT;


2406   //     AddP  ( base == top )
2407   //
2408   Node *base = addp->in(AddPNode::Base);
2409   if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
2410     base = addp->in(AddPNode::Address);
2411     while (base->is_AddP()) {
2412       // Case #6 (unsafe access) may have several chained AddP nodes.
2413       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2414       base = base->in(AddPNode::Address);
2415     }
2416     if (base->Opcode() == Op_CheckCastPP &&
2417         base->bottom_type()->isa_rawptr() &&
2418         _igvn->type(base->in(1))->isa_oopptr()) {
2419       base = base->in(1); // Case #9
2420     } else {
2421       Node* uncast_base = base->uncast();
2422       int opcode = uncast_base->Opcode();
2423       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
2424              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
2425              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
2426              (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()) ||
2427              uncast_base->Opcode() == Op_ShenandoahLoadReferenceBarrier, "sanity");
2428     }
2429   }
2430   return base;
2431 }
2432 
2433 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2434   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2435   Node* addp2 = addp->raw_out(0);
2436   if (addp->outcnt() == 1 && addp2->is_AddP() &&
2437       addp2->in(AddPNode::Base) == n &&
2438       addp2->in(AddPNode::Address) == addp) {
2439     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2440     //
2441     // Find array's offset to push it on worklist first and
2442     // as result process an array's element offset first (pushed second)
2443     // to avoid CastPP for the array's offset.
2444     // Otherwise the inserted CastPP (LocalVar) will point to what
2445     // the AddP (Field) points to. Which would be wrong since
2446     // the algorithm expects the CastPP has the same point as
2447     // as AddP's base CheckCastPP (LocalVar).


3145           }
3146         }
3147       }
3148     } else if (n->is_AddP()) {
3149       JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
3150       if (jobj == NULL || jobj == phantom_obj) {
3151 #ifdef ASSERT
3152         ptnode_adr(get_addp_base(n)->_idx)->dump();
3153         ptnode_adr(n->_idx)->dump();
3154         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3155 #endif
3156         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3157         return;
3158       }
3159       Node *base = get_map(jobj->idx());  // CheckCastPP node
3160       if (!split_AddP(n, base)) continue; // wrong type from dead path
3161     } else if (n->is_Phi() ||
3162                n->is_CheckCastPP() ||
3163                n->is_EncodeP() ||
3164                n->is_DecodeN() ||
3165                n->Opcode() == Op_ShenandoahLoadReferenceBarrier ||
3166                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
3167       if (visited.test_set(n->_idx)) {
3168         assert(n->is_Phi(), "loops only through Phi's");
3169         continue;  // already processed
3170       }
3171       JavaObjectNode* jobj = unique_java_object(n);
3172       if (jobj == NULL || jobj == phantom_obj) {
3173 #ifdef ASSERT
3174         ptnode_adr(n->_idx)->dump();
3175         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3176 #endif
3177         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3178         return;
3179       } else {
3180         Node *val = get_map(jobj->idx());   // CheckCastPP node
3181         TypeNode *tn = n->as_Type();
3182         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3183         assert(tinst != NULL && tinst->is_known_instance() &&
3184                tinst->instance_id() == jobj->idx() , "instance type expected.");
3185 


3216     // push allocation's users on appropriate worklist
3217     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3218       Node *use = n->fast_out(i);
3219       if(use->is_Mem() && use->in(MemNode::Address) == n) {
3220         // Load/store to instance's field
3221         memnode_worklist.append_if_missing(use);
3222       } else if (use->is_MemBar()) {
3223         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3224           memnode_worklist.append_if_missing(use);
3225         }
3226       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3227         Node* addp2 = find_second_addp(use, n);
3228         if (addp2 != NULL) {
3229           alloc_worklist.append_if_missing(addp2);
3230         }
3231         alloc_worklist.append_if_missing(use);
3232       } else if (use->is_Phi() ||
3233                  use->is_CheckCastPP() ||
3234                  use->is_EncodeNarrowPtr() ||
3235                  use->is_DecodeNarrowPtr() ||
3236                  use->Opcode() == Op_ShenandoahLoadReferenceBarrier ||
3237                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3238         alloc_worklist.append_if_missing(use);
3239 #ifdef ASSERT
3240       } else if (use->is_Mem()) {
3241         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3242       } else if (use->is_MergeMem()) {
3243         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3244       } else if (use->is_SafePoint()) {
3245         // Look for MergeMem nodes for calls which reference unique allocation
3246         // (through CheckCastPP nodes) even for debug info.
3247         Node* m = use->in(TypeFunc::Memory);
3248         if (m->is_MergeMem()) {
3249           assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3250         }
3251       } else if (use->Opcode() == Op_EncodeISOArray) {
3252         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3253           // EncodeISOArray overwrites destination array
3254           memnode_worklist.append_if_missing(use);
3255         }
3256       } else {


< prev index next >