< prev index next >

src/hotspot/share/opto/macro.cpp

Print this page




  30 #include "opto/arraycopynode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/compile.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/graphKit.hpp"
  37 #include "opto/locknode.hpp"
  38 #include "opto/loopnode.hpp"
  39 #include "opto/macro.hpp"
  40 #include "opto/memnode.hpp"
  41 #include "opto/narrowptrnode.hpp"
  42 #include "opto/node.hpp"
  43 #include "opto/opaquenode.hpp"
  44 #include "opto/phaseX.hpp"
  45 #include "opto/rootnode.hpp"
  46 #include "opto/runtime.hpp"
  47 #include "opto/subnode.hpp"
  48 #include "opto/type.hpp"
  49 #include "runtime/sharedRuntime.hpp"

  50 #if INCLUDE_G1GC
  51 #include "gc/g1/g1ThreadLocalData.hpp"
  52 #endif // INCLUDE_G1GC



  53 
  54 
  55 //
  56 // Replace any references to "oldref" in inputs to "use" with "newref".
  57 // Returns the number of replacements made.
  58 //
  59 int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
  60   int nreplacements = 0;
  61   uint req = use->req();
  62   for (uint j = 0; j < use->len(); j++) {
  63     Node *uin = use->in(j);
  64     if (uin == oldref) {
  65       if (j < req)
  66         use->set_req(j, newref);
  67       else
  68         use->set_prec(j, newref);
  69       nreplacements++;
  70     } else if (j >= req && uin == NULL) {
  71       break;
  72     }


 417   for (uint j = 1; j < length; j++) {
 418     Node *in = mem->in(j);
 419     if (in == NULL || in->is_top()) {
 420       values.at_put(j, in);
 421     } else  {
 422       Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
 423       if (val == start_mem || val == alloc_mem) {
 424         // hit a sentinel, return appropriate 0 value
 425         values.at_put(j, _igvn.zerocon(ft));
 426         continue;
 427       }
 428       if (val->is_Initialize()) {
 429         val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 430       }
 431       if (val == NULL) {
 432         return NULL;  // can't find a value on this path
 433       }
 434       if (val == mem) {
 435         values.at_put(j, mem);
 436       } else if (val->is_Store()) {
 437         values.at_put(j, val->in(MemNode::ValueIn));




 438       } else if(val->is_Proj() && val->in(0) == alloc) {
 439         values.at_put(j, _igvn.zerocon(ft));
 440       } else if (val->is_Phi()) {
 441         val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
 442         if (val == NULL) {
 443           return NULL;
 444         }
 445         values.at_put(j, val);
 446       } else if (val->Opcode() == Op_SCMemProj) {
 447         assert(val->in(0)->is_LoadStore() ||
 448                val->in(0)->Opcode() == Op_EncodeISOArray ||
 449                val->in(0)->Opcode() == Op_StrCompressedCopy, "sanity");
 450         assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
 451         return NULL;
 452       } else if (val->is_ArrayCopy()) {
 453         Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), val->in(TypeFunc::Memory), ft, phi_type, alloc);
 454         if (res == NULL) {
 455           return NULL;
 456         }
 457         values.at_put(j, res);


 529           unique_input = top;
 530           break;
 531         }
 532       }
 533       if (unique_input != NULL && unique_input != top) {
 534         mem = unique_input;
 535       } else {
 536         done = true;
 537       }
 538     } else if (mem->is_ArrayCopy()) {
 539       done = true;
 540     } else {
 541       assert(false, "unexpected node");
 542     }
 543   }
 544   if (mem != NULL) {
 545     if (mem == start_mem || mem == alloc_mem) {
 546       // hit a sentinel, return appropriate 0 value
 547       return _igvn.zerocon(ft);
 548     } else if (mem->is_Store()) {
 549       return mem->in(MemNode::ValueIn);




 550     } else if (mem->is_Phi()) {
 551       // attempt to produce a Phi reflecting the values on the input paths of the Phi
 552       Node_Stack value_phis(a, 8);
 553       Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
 554       if (phi != NULL) {
 555         return phi;
 556       } else {
 557         // Kill all new Phis
 558         while(value_phis.is_nonempty()) {
 559           Node* n = value_phis.node();
 560           _igvn.replace_node(n, C->top());
 561           value_phis.pop();
 562         }
 563       }
 564     } else if (mem->is_ArrayCopy()) {
 565       Node* ctl = mem->in(0);
 566       Node* m = mem->in(TypeFunc::Memory);
 567       if (sfpt_ctl->is_Proj() && sfpt_ctl->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 568         // pin the loads in the uncommon trap path
 569         ctl = sfpt_ctl;


 606   }
 607 
 608   if (can_eliminate && res != NULL) {
 609     for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
 610                                j < jmax && can_eliminate; j++) {
 611       Node* use = res->fast_out(j);
 612 
 613       if (use->is_AddP()) {
 614         const TypePtr* addp_type = _igvn.type(use)->is_ptr();
 615         int offset = addp_type->offset();
 616 
 617         if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
 618           NOT_PRODUCT(fail_eliminate = "Undefined field referrence";)
 619           can_eliminate = false;
 620           break;
 621         }
 622         for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
 623                                    k < kmax && can_eliminate; k++) {
 624           Node* n = use->fast_out(k);
 625           if (!n->is_Store() && n->Opcode() != Op_CastP2X &&

 626               !(n->is_ArrayCopy() &&
 627                 n->as_ArrayCopy()->is_clonebasic() &&
 628                 n->in(ArrayCopyNode::Dest) == use)) {
 629             DEBUG_ONLY(disq_node = n;)
 630             if (n->is_Load() || n->is_LoadStore()) {
 631               NOT_PRODUCT(fail_eliminate = "Field load";)
 632             } else {
 633               NOT_PRODUCT(fail_eliminate = "Not store field referrence";)
 634             }
 635             can_eliminate = false;
 636           }
 637         }
 638       } else if (use->is_ArrayCopy() &&
 639                  (use->as_ArrayCopy()->is_arraycopy_validated() ||
 640                   use->as_ArrayCopy()->is_copyof_validated() ||
 641                   use->as_ArrayCopy()->is_copyofrange_validated()) &&
 642                  use->in(ArrayCopyNode::Dest) == res) {
 643         // ok to eliminate
 644       } else if (use->is_SafePoint()) {
 645         SafePointNode* sfpt = use->as_SafePoint();


 917             }
 918 #endif
 919             _igvn.replace_node(n, n->in(MemNode::Memory));
 920           } else if (n->is_ArrayCopy()) {
 921             // Disconnect ArrayCopy node
 922             ArrayCopyNode* ac = n->as_ArrayCopy();
 923             assert(ac->is_clonebasic(), "unexpected array copy kind");
 924             Node* membar_after = ac->proj_out(TypeFunc::Control)->unique_ctrl_out();
 925             disconnect_projections(ac, _igvn);
 926             assert(alloc->in(0)->is_Proj() && alloc->in(0)->in(0)->Opcode() == Op_MemBarCPUOrder, "mem barrier expected before allocation");
 927             Node* membar_before = alloc->in(0)->in(0);
 928             disconnect_projections(membar_before->as_MemBar(), _igvn);
 929             if (membar_after->is_MemBar()) {
 930               disconnect_projections(membar_after->as_MemBar(), _igvn);
 931             }
 932           } else {
 933             eliminate_gc_barrier(n);
 934           }
 935           k -= (oc2 - use->outcnt());
 936         }



 937       } else if (use->is_ArrayCopy()) {
 938         // Disconnect ArrayCopy node
 939         ArrayCopyNode* ac = use->as_ArrayCopy();
 940         assert(ac->is_arraycopy_validated() ||
 941                ac->is_copyof_validated() ||
 942                ac->is_copyofrange_validated(), "unsupported");
 943         CallProjections callprojs;
 944         ac->extract_projections(&callprojs, true);
 945 
 946         _igvn.replace_node(callprojs.fallthrough_ioproj, ac->in(TypeFunc::I_O));
 947         _igvn.replace_node(callprojs.fallthrough_memproj, ac->in(TypeFunc::Memory));
 948         _igvn.replace_node(callprojs.fallthrough_catchproj, ac->in(TypeFunc::Control));
 949 
 950         // Set control to top. IGVN will remove the remaining projections
 951         ac->set_req(0, top());
 952         ac->replace_edge(res, top());
 953 
 954         // Disconnect src right away: it can help find new
 955         // opportunities for allocation elimination
 956         Node* src = ac->in(ArrayCopyNode::Src);


1339       contended_phi_rawmem = mem;
1340     } else {
1341       contended_region = new RegionNode(3);
1342       contended_phi_rawmem = new PhiNode(contended_region, Type::MEMORY, TypeRawPtr::BOTTOM);
1343       // Now handle the passing-too-big test.  We fall into the contended
1344       // loop-back merge point.
1345       contended_region    ->init_req(fall_in_path, toobig_false);
1346       contended_phi_rawmem->init_req(fall_in_path, mem);
1347       transform_later(contended_region);
1348       transform_later(contended_phi_rawmem);
1349     }
1350 
1351     // Load(-locked) the heap top.
1352     // See note above concerning the control input when using a TLAB
1353     Node *old_eden_top = UseTLAB
1354       ? new LoadPNode      (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered)
1355       : new LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr, MemNode::acquire);
1356 
1357     transform_later(old_eden_top);
1358     // Add to heap top to get a new heap top











1359     Node *new_eden_top = new AddPNode(top(), old_eden_top, size_in_bytes);
1360     transform_later(new_eden_top);
1361     // Check for needing a GC; compare against heap end
1362     Node *needgc_cmp = new CmpPNode(new_eden_top, eden_end);
1363     transform_later(needgc_cmp);
1364     Node *needgc_bol = new BoolNode(needgc_cmp, BoolTest::ge);
1365     transform_later(needgc_bol);
1366     IfNode *needgc_iff = new IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
1367     transform_later(needgc_iff);
1368 
1369     // Plug the failing-heap-space-need-gc test into the slow-path region
1370     Node *needgc_true = new IfTrueNode(needgc_iff);
1371     transform_later(needgc_true);
1372     if (initial_slow_test) {
1373       slow_region->init_req(need_gc_path, needgc_true);
1374       // This completes all paths into the slow merge point
1375       transform_later(slow_region);
1376     } else {                      // No initial slow path needed!
1377       // Just fall from the need-GC path straight into the VM call.
1378       slow_region = needgc_true;


1429 
1430       // Bump total allocated bytes for this thread
1431       Node* thread = new ThreadLocalNode();
1432       transform_later(thread);
1433       Node* alloc_bytes_adr = basic_plus_adr(top()/*not oop*/, thread,
1434                                              in_bytes(JavaThread::allocated_bytes_offset()));
1435       Node* alloc_bytes = make_load(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
1436                                     0, TypeLong::LONG, T_LONG);
1437 #ifdef _LP64
1438       Node* alloc_size = size_in_bytes;
1439 #else
1440       Node* alloc_size = new ConvI2LNode(size_in_bytes);
1441       transform_later(alloc_size);
1442 #endif
1443       Node* new_alloc_bytes = new AddLNode(alloc_bytes, alloc_size);
1444       transform_later(new_alloc_bytes);
1445       fast_oop_rawmem = make_store(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
1446                                    0, new_alloc_bytes, T_LONG);
1447     }
1448 








1449     InitializeNode* init = alloc->initialization();
1450     fast_oop_rawmem = initialize_object(alloc,
1451                                         fast_oop_ctrl, fast_oop_rawmem, fast_oop,
1452                                         klass_node, length, size_in_bytes);
1453 
1454     // If initialization is performed by an array copy, any required
1455     // MemBarStoreStore was already added. If the object does not
1456     // escape no need for a MemBarStoreStore. If the object does not
1457     // escape in its initializer and memory barrier (MemBarStoreStore or
1458     // stronger) is already added at exit of initializer, also no need
1459     // for a MemBarStoreStore. Otherwise we need a MemBarStoreStore
1460     // so that stores that initialize this object can't be reordered
1461     // with a subsequent store that makes this object accessible by
1462     // other threads.
1463     // Other threads include java threads and JVM internal threads
1464     // (for example concurrent GC threads). Current concurrent GC
1465     // implementation: CMS and G1 will not scan newly created object,
1466     // so it's safe to skip storestore barrier when allocation does
1467     // not escape.
1468     if (!alloc->does_not_escape_thread() &&
1469         !alloc->is_allocation_MemBar_redundant() &&
1470         (init == NULL || !init->is_complete_with_arraycopy())) {
1471       if (init == NULL || init->req() < InitializeNode::RawStores) {
1472         // No InitializeNode or no stores captured by zeroing


1719   // For now only enable fast locking for non-array types
1720   if (UseBiasedLocking && (length == NULL)) {
1721     mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
1722   } else {
1723     mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
1724   }
1725   rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
1726 
1727   rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
1728   int header_size = alloc->minimum_header_size();  // conservatively small
1729 
1730   // Array length
1731   if (length != NULL) {         // Arrays need length field
1732     rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
1733     // conservatively small header size:
1734     header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1735     ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass();
1736     if (k->is_array_klass())    // we know the exact header size in most cases:
1737       header_size = Klass::layout_helper_header_size(k->layout_helper());
1738   }







1739 
1740   // Clear the object body, if necessary.
1741   if (init == NULL) {
1742     // The init has somehow disappeared; be cautious and clear everything.
1743     //
1744     // This can happen if a node is allocated but an uncommon trap occurs
1745     // immediately.  In this case, the Initialize gets associated with the
1746     // trap, and may be placed in a different (outer) loop, if the Allocate
1747     // is in a loop.  If (this is rare) the inner loop gets unrolled, then
1748     // there can be two Allocates to one Initialize.  The answer in all these
1749     // edge cases is safety first.  It is always safe to clear immediately
1750     // within an Allocate, and then (maybe or maybe not) clear some more later.
1751     if (!(UseTLAB && ZeroTLAB)) {
1752       rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
1753                                             header_size, size_in_bytes,
1754                                             &_igvn);
1755     }
1756   } else {
1757     if (!init->is_complete()) {
1758       // Try to win by zeroing only what the init does not store.




  30 #include "opto/arraycopynode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/compile.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/graphKit.hpp"
  37 #include "opto/locknode.hpp"
  38 #include "opto/loopnode.hpp"
  39 #include "opto/macro.hpp"
  40 #include "opto/memnode.hpp"
  41 #include "opto/narrowptrnode.hpp"
  42 #include "opto/node.hpp"
  43 #include "opto/opaquenode.hpp"
  44 #include "opto/phaseX.hpp"
  45 #include "opto/rootnode.hpp"
  46 #include "opto/runtime.hpp"
  47 #include "opto/subnode.hpp"
  48 #include "opto/type.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 #include "utilities/macros.hpp"
  51 #if INCLUDE_G1GC
  52 #include "gc/g1/g1ThreadLocalData.hpp"
  53 #endif // INCLUDE_G1GC
  54 #if INCLUDE_SHENANDOAHGC
  55 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  56 #endif
  57 
  58 
  59 //
  60 // Replace any references to "oldref" in inputs to "use" with "newref".
  61 // Returns the number of replacements made.
  62 //
  63 int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
  64   int nreplacements = 0;
  65   uint req = use->req();
  66   for (uint j = 0; j < use->len(); j++) {
  67     Node *uin = use->in(j);
  68     if (uin == oldref) {
  69       if (j < req)
  70         use->set_req(j, newref);
  71       else
  72         use->set_prec(j, newref);
  73       nreplacements++;
  74     } else if (j >= req && uin == NULL) {
  75       break;
  76     }


 421   for (uint j = 1; j < length; j++) {
 422     Node *in = mem->in(j);
 423     if (in == NULL || in->is_top()) {
 424       values.at_put(j, in);
 425     } else  {
 426       Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
 427       if (val == start_mem || val == alloc_mem) {
 428         // hit a sentinel, return appropriate 0 value
 429         values.at_put(j, _igvn.zerocon(ft));
 430         continue;
 431       }
 432       if (val->is_Initialize()) {
 433         val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 434       }
 435       if (val == NULL) {
 436         return NULL;  // can't find a value on this path
 437       }
 438       if (val == mem) {
 439         values.at_put(j, mem);
 440       } else if (val->is_Store()) {
 441         Node* n = val->in(MemNode::ValueIn);
 442 #if INCLUDE_SHENANDOAHGC
 443         n = ShenandoahBarrierNode::skip_through_barrier(n);
 444 #endif
 445         values.at_put(j, n);
 446       } else if(val->is_Proj() && val->in(0) == alloc) {
 447         values.at_put(j, _igvn.zerocon(ft));
 448       } else if (val->is_Phi()) {
 449         val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
 450         if (val == NULL) {
 451           return NULL;
 452         }
 453         values.at_put(j, val);
 454       } else if (val->Opcode() == Op_SCMemProj) {
 455         assert(val->in(0)->is_LoadStore() ||
 456                val->in(0)->Opcode() == Op_EncodeISOArray ||
 457                val->in(0)->Opcode() == Op_StrCompressedCopy, "sanity");
 458         assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
 459         return NULL;
 460       } else if (val->is_ArrayCopy()) {
 461         Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), val->in(TypeFunc::Memory), ft, phi_type, alloc);
 462         if (res == NULL) {
 463           return NULL;
 464         }
 465         values.at_put(j, res);


 537           unique_input = top;
 538           break;
 539         }
 540       }
 541       if (unique_input != NULL && unique_input != top) {
 542         mem = unique_input;
 543       } else {
 544         done = true;
 545       }
 546     } else if (mem->is_ArrayCopy()) {
 547       done = true;
 548     } else {
 549       assert(false, "unexpected node");
 550     }
 551   }
 552   if (mem != NULL) {
 553     if (mem == start_mem || mem == alloc_mem) {
 554       // hit a sentinel, return appropriate 0 value
 555       return _igvn.zerocon(ft);
 556     } else if (mem->is_Store()) {
 557       Node* n = mem->in(MemNode::ValueIn);
 558 #if INCLUDE_SHENANDOAHGC
 559       n = ShenandoahBarrierNode::skip_through_barrier(n);
 560 #endif
 561       return n;
 562     } else if (mem->is_Phi()) {
 563       // attempt to produce a Phi reflecting the values on the input paths of the Phi
 564       Node_Stack value_phis(a, 8);
 565       Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
 566       if (phi != NULL) {
 567         return phi;
 568       } else {
 569         // Kill all new Phis
 570         while(value_phis.is_nonempty()) {
 571           Node* n = value_phis.node();
 572           _igvn.replace_node(n, C->top());
 573           value_phis.pop();
 574         }
 575       }
 576     } else if (mem->is_ArrayCopy()) {
 577       Node* ctl = mem->in(0);
 578       Node* m = mem->in(TypeFunc::Memory);
 579       if (sfpt_ctl->is_Proj() && sfpt_ctl->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 580         // pin the loads in the uncommon trap path
 581         ctl = sfpt_ctl;


 618   }
 619 
 620   if (can_eliminate && res != NULL) {
 621     for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
 622                                j < jmax && can_eliminate; j++) {
 623       Node* use = res->fast_out(j);
 624 
 625       if (use->is_AddP()) {
 626         const TypePtr* addp_type = _igvn.type(use)->is_ptr();
 627         int offset = addp_type->offset();
 628 
 629         if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
 630           NOT_PRODUCT(fail_eliminate = "Undefined field referrence";)
 631           can_eliminate = false;
 632           break;
 633         }
 634         for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
 635                                    k < kmax && can_eliminate; k++) {
 636           Node* n = use->fast_out(k);
 637           if (!n->is_Store() && n->Opcode() != Op_CastP2X &&
 638               SHENANDOAHGC_ONLY((!UseShenandoahGC || !ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(n)) &&)
 639               !(n->is_ArrayCopy() &&
 640                 n->as_ArrayCopy()->is_clonebasic() &&
 641                 n->in(ArrayCopyNode::Dest) == use)) {
 642             DEBUG_ONLY(disq_node = n;)
 643             if (n->is_Load() || n->is_LoadStore()) {
 644               NOT_PRODUCT(fail_eliminate = "Field load";)
 645             } else {
 646               NOT_PRODUCT(fail_eliminate = "Not store field referrence";)
 647             }
 648             can_eliminate = false;
 649           }
 650         }
 651       } else if (use->is_ArrayCopy() &&
 652                  (use->as_ArrayCopy()->is_arraycopy_validated() ||
 653                   use->as_ArrayCopy()->is_copyof_validated() ||
 654                   use->as_ArrayCopy()->is_copyofrange_validated()) &&
 655                  use->in(ArrayCopyNode::Dest) == res) {
 656         // ok to eliminate
 657       } else if (use->is_SafePoint()) {
 658         SafePointNode* sfpt = use->as_SafePoint();


 930             }
 931 #endif
 932             _igvn.replace_node(n, n->in(MemNode::Memory));
 933           } else if (n->is_ArrayCopy()) {
 934             // Disconnect ArrayCopy node
 935             ArrayCopyNode* ac = n->as_ArrayCopy();
 936             assert(ac->is_clonebasic(), "unexpected array copy kind");
 937             Node* membar_after = ac->proj_out(TypeFunc::Control)->unique_ctrl_out();
 938             disconnect_projections(ac, _igvn);
 939             assert(alloc->in(0)->is_Proj() && alloc->in(0)->in(0)->Opcode() == Op_MemBarCPUOrder, "mem barrier expected before allocation");
 940             Node* membar_before = alloc->in(0)->in(0);
 941             disconnect_projections(membar_before->as_MemBar(), _igvn);
 942             if (membar_after->is_MemBar()) {
 943               disconnect_projections(membar_after->as_MemBar(), _igvn);
 944             }
 945           } else {
 946             eliminate_gc_barrier(n);
 947           }
 948           k -= (oc2 - use->outcnt());
 949         }
 950         if (UseShenandoahGC) {
 951           _igvn.remove_dead_node(use);
 952         }
 953       } else if (use->is_ArrayCopy()) {
 954         // Disconnect ArrayCopy node
 955         ArrayCopyNode* ac = use->as_ArrayCopy();
 956         assert(ac->is_arraycopy_validated() ||
 957                ac->is_copyof_validated() ||
 958                ac->is_copyofrange_validated(), "unsupported");
 959         CallProjections callprojs;
 960         ac->extract_projections(&callprojs, true);
 961 
 962         _igvn.replace_node(callprojs.fallthrough_ioproj, ac->in(TypeFunc::I_O));
 963         _igvn.replace_node(callprojs.fallthrough_memproj, ac->in(TypeFunc::Memory));
 964         _igvn.replace_node(callprojs.fallthrough_catchproj, ac->in(TypeFunc::Control));
 965 
 966         // Set control to top. IGVN will remove the remaining projections
 967         ac->set_req(0, top());
 968         ac->replace_edge(res, top());
 969 
 970         // Disconnect src right away: it can help find new
 971         // opportunities for allocation elimination
 972         Node* src = ac->in(ArrayCopyNode::Src);


1355       contended_phi_rawmem = mem;
1356     } else {
1357       contended_region = new RegionNode(3);
1358       contended_phi_rawmem = new PhiNode(contended_region, Type::MEMORY, TypeRawPtr::BOTTOM);
1359       // Now handle the passing-too-big test.  We fall into the contended
1360       // loop-back merge point.
1361       contended_region    ->init_req(fall_in_path, toobig_false);
1362       contended_phi_rawmem->init_req(fall_in_path, mem);
1363       transform_later(contended_region);
1364       transform_later(contended_phi_rawmem);
1365     }
1366 
1367     // Load(-locked) the heap top.
1368     // See note above concerning the control input when using a TLAB
1369     Node *old_eden_top = UseTLAB
1370       ? new LoadPNode      (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered)
1371       : new LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr, MemNode::acquire);
1372 
1373     transform_later(old_eden_top);
1374     // Add to heap top to get a new heap top
1375 
1376     Node* init_size_in_bytes = size_in_bytes;
1377 
1378 #if INCLUDE_SHENANDOAHGC
1379     if (UseShenandoahGC) {
1380       // Allocate several words more for the Shenandoah brooks pointer.
1381       size_in_bytes = new AddXNode(size_in_bytes, _igvn.MakeConX(ShenandoahBrooksPointer::byte_size()));
1382       transform_later(size_in_bytes);
1383     }
1384 #endif
1385 
1386     Node *new_eden_top = new AddPNode(top(), old_eden_top, size_in_bytes);
1387     transform_later(new_eden_top);
1388     // Check for needing a GC; compare against heap end
1389     Node *needgc_cmp = new CmpPNode(new_eden_top, eden_end);
1390     transform_later(needgc_cmp);
1391     Node *needgc_bol = new BoolNode(needgc_cmp, BoolTest::ge);
1392     transform_later(needgc_bol);
1393     IfNode *needgc_iff = new IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
1394     transform_later(needgc_iff);
1395 
1396     // Plug the failing-heap-space-need-gc test into the slow-path region
1397     Node *needgc_true = new IfTrueNode(needgc_iff);
1398     transform_later(needgc_true);
1399     if (initial_slow_test) {
1400       slow_region->init_req(need_gc_path, needgc_true);
1401       // This completes all paths into the slow merge point
1402       transform_later(slow_region);
1403     } else {                      // No initial slow path needed!
1404       // Just fall from the need-GC path straight into the VM call.
1405       slow_region = needgc_true;


1456 
1457       // Bump total allocated bytes for this thread
1458       Node* thread = new ThreadLocalNode();
1459       transform_later(thread);
1460       Node* alloc_bytes_adr = basic_plus_adr(top()/*not oop*/, thread,
1461                                              in_bytes(JavaThread::allocated_bytes_offset()));
1462       Node* alloc_bytes = make_load(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
1463                                     0, TypeLong::LONG, T_LONG);
1464 #ifdef _LP64
1465       Node* alloc_size = size_in_bytes;
1466 #else
1467       Node* alloc_size = new ConvI2LNode(size_in_bytes);
1468       transform_later(alloc_size);
1469 #endif
1470       Node* new_alloc_bytes = new AddLNode(alloc_bytes, alloc_size);
1471       transform_later(new_alloc_bytes);
1472       fast_oop_rawmem = make_store(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
1473                                    0, new_alloc_bytes, T_LONG);
1474     }
1475 
1476 #if INCLUDE_SHENANDOAHGC
1477     if (UseShenandoahGC) {
1478       // Bump up object for Shenandoah brooks pointer.
1479       fast_oop = new AddPNode(top(), fast_oop, _igvn.MakeConX(ShenandoahBrooksPointer::byte_size()));
1480       transform_later(fast_oop);
1481     }
1482 #endif
1483 
1484     InitializeNode* init = alloc->initialization();
1485     fast_oop_rawmem = initialize_object(alloc,
1486                                         fast_oop_ctrl, fast_oop_rawmem, fast_oop,
1487                                         klass_node, length, init_size_in_bytes);
1488 
1489     // If initialization is performed by an array copy, any required
1490     // MemBarStoreStore was already added. If the object does not
1491     // escape no need for a MemBarStoreStore. If the object does not
1492     // escape in its initializer and memory barrier (MemBarStoreStore or
1493     // stronger) is already added at exit of initializer, also no need
1494     // for a MemBarStoreStore. Otherwise we need a MemBarStoreStore
1495     // so that stores that initialize this object can't be reordered
1496     // with a subsequent store that makes this object accessible by
1497     // other threads.
1498     // Other threads include java threads and JVM internal threads
1499     // (for example concurrent GC threads). Current concurrent GC
1500     // implementation: CMS and G1 will not scan newly created object,
1501     // so it's safe to skip storestore barrier when allocation does
1502     // not escape.
1503     if (!alloc->does_not_escape_thread() &&
1504         !alloc->is_allocation_MemBar_redundant() &&
1505         (init == NULL || !init->is_complete_with_arraycopy())) {
1506       if (init == NULL || init->req() < InitializeNode::RawStores) {
1507         // No InitializeNode or no stores captured by zeroing


1754   // For now only enable fast locking for non-array types
1755   if (UseBiasedLocking && (length == NULL)) {
1756     mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
1757   } else {
1758     mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
1759   }
1760   rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
1761 
1762   rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
1763   int header_size = alloc->minimum_header_size();  // conservatively small
1764 
1765   // Array length
1766   if (length != NULL) {         // Arrays need length field
1767     rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
1768     // conservatively small header size:
1769     header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1770     ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass();
1771     if (k->is_array_klass())    // we know the exact header size in most cases:
1772       header_size = Klass::layout_helper_header_size(k->layout_helper());
1773   }
1774 
1775 #if INCLUDE_SHENANDOAHGC
1776   if (UseShenandoahGC) {
1777     // Initialize Shenandoah brooks pointer to point to the object itself.
1778     rawmem = make_store(control, rawmem, object, ShenandoahBrooksPointer::byte_offset(), object, T_OBJECT);
1779   }
1780 #endif
1781 
1782   // Clear the object body, if necessary.
1783   if (init == NULL) {
1784     // The init has somehow disappeared; be cautious and clear everything.
1785     //
1786     // This can happen if a node is allocated but an uncommon trap occurs
1787     // immediately.  In this case, the Initialize gets associated with the
1788     // trap, and may be placed in a different (outer) loop, if the Allocate
1789     // is in a loop.  If (this is rare) the inner loop gets unrolled, then
1790     // there can be two Allocates to one Initialize.  The answer in all these
1791     // edge cases is safety first.  It is always safe to clear immediately
1792     // within an Allocate, and then (maybe or maybe not) clear some more later.
1793     if (!(UseTLAB && ZeroTLAB)) {
1794       rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
1795                                             header_size, size_in_bytes,
1796                                             &_igvn);
1797     }
1798   } else {
1799     if (!init->is_complete()) {
1800       // Try to win by zeroing only what the init does not store.


< prev index next >