< prev index next >

src/share/vm/opto/macro.cpp

Print this page




   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"

  27 #include "libadt/vectset.hpp"
  28 #include "opto/addnode.hpp"
  29 #include "opto/callnode.hpp"
  30 #include "opto/cfgnode.hpp"
  31 #include "opto/compile.hpp"
  32 #include "opto/connode.hpp"
  33 #include "opto/locknode.hpp"
  34 #include "opto/loopnode.hpp"
  35 #include "opto/macro.hpp"
  36 #include "opto/memnode.hpp"
  37 #include "opto/node.hpp"
  38 #include "opto/phaseX.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"

  41 #include "opto/subnode.hpp"
  42 #include "opto/type.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 
  45 
  46 //
  47 // Replace any references to "oldref" in inputs to "use" with "newref".
  48 // Returns the number of replacements made.
  49 //
  50 int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
  51   int nreplacements = 0;
  52   uint req = use->req();
  53   for (uint j = 0; j < use->len(); j++) {
  54     Node *uin = use->in(j);
  55     if (uin == oldref) {
  56       if (j < req)
  57         use->set_req(j, newref);
  58       else
  59         use->set_prec(j, newref);
  60       nreplacements++;


 427   for (uint j = 1; j < length; j++) {
 428     Node *in = mem->in(j);
 429     if (in == NULL || in->is_top()) {
 430       values.at_put(j, in);
 431     } else  {
 432       Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
 433       if (val == start_mem || val == alloc_mem) {
 434         // hit a sentinel, return appropriate 0 value
 435         values.at_put(j, _igvn.zerocon(ft));
 436         continue;
 437       }
 438       if (val->is_Initialize()) {
 439         val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 440       }
 441       if (val == NULL) {
 442         return NULL;  // can't find a value on this path
 443       }
 444       if (val == mem) {
 445         values.at_put(j, mem);
 446       } else if (val->is_Store()) {
 447         values.at_put(j, val->in(MemNode::ValueIn));
 448       } else if(val->is_Proj() && val->in(0) == alloc) {
 449         values.at_put(j, _igvn.zerocon(ft));
 450       } else if (val->is_Phi()) {
 451         val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
 452         if (val == NULL) {
 453           return NULL;
 454         }
 455         values.at_put(j, val);
 456       } else if (val->Opcode() == Op_SCMemProj) {
 457         assert(val->in(0)->is_LoadStore() || val->in(0)->Opcode() == Op_EncodeISOArray, "sanity");
 458         assert(false, "Object is not scalar replaceable if a LoadStore node access its field");
 459         return NULL;
 460       } else {
 461 #ifdef ASSERT
 462         val->dump();
 463         assert(false, "unknown node on this path");
 464 #endif
 465         return NULL;  // unknown node on this path
 466       }
 467     }


 529           unique_input = n;
 530         } else if (unique_input != n) {
 531           unique_input = top;
 532           break;
 533         }
 534       }
 535       if (unique_input != NULL && unique_input != top) {
 536         mem = unique_input;
 537       } else {
 538         done = true;
 539       }
 540     } else {
 541       assert(false, "unexpected node");
 542     }
 543   }
 544   if (mem != NULL) {
 545     if (mem == start_mem || mem == alloc_mem) {
 546       // hit a sentinel, return appropriate 0 value
 547       return _igvn.zerocon(ft);
 548     } else if (mem->is_Store()) {
 549       return mem->in(MemNode::ValueIn);
 550     } else if (mem->is_Phi()) {
 551       // attempt to produce a Phi reflecting the values on the input paths of the Phi
 552       Node_Stack value_phis(a, 8);
 553       Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
 554       if (phi != NULL) {
 555         return phi;
 556       } else {
 557         // Kill all new Phis
 558         while(value_phis.is_nonempty()) {
 559           Node* n = value_phis.node();
 560           _igvn.replace_node(n, C->top());
 561           value_phis.pop();
 562         }
 563       }
 564     }
 565   }
 566   // Something go wrong.
 567   return NULL;
 568 }
 569 


 596     }
 597   }
 598 
 599   if (can_eliminate && res != NULL) {
 600     for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
 601                                j < jmax && can_eliminate; j++) {
 602       Node* use = res->fast_out(j);
 603 
 604       if (use->is_AddP()) {
 605         const TypePtr* addp_type = _igvn.type(use)->is_ptr();
 606         int offset = addp_type->offset();
 607 
 608         if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
 609           NOT_PRODUCT(fail_eliminate = "Undefined field referrence";)
 610           can_eliminate = false;
 611           break;
 612         }
 613         for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
 614                                    k < kmax && can_eliminate; k++) {
 615           Node* n = use->fast_out(k);
 616           if (!n->is_Store() && n->Opcode() != Op_CastP2X) {

 617             DEBUG_ONLY(disq_node = n;)
 618             if (n->is_Load() || n->is_LoadStore()) {
 619               NOT_PRODUCT(fail_eliminate = "Field load";)
 620             } else {
 621               NOT_PRODUCT(fail_eliminate = "Not store field referrence";)
 622             }
 623             can_eliminate = false;
 624           }
 625         }
 626       } else if (use->is_SafePoint()) {
 627         SafePointNode* sfpt = use->as_SafePoint();
 628         if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) {
 629           // Object is passed as argument.
 630           DEBUG_ONLY(disq_node = use;)
 631           NOT_PRODUCT(fail_eliminate = "Object is passed as argument";)
 632           can_eliminate = false;
 633         }
 634         Node* sfptMem = sfpt->memory();
 635         if (sfptMem == NULL || sfptMem->is_top()) {
 636           DEBUG_ONLY(disq_node = use;)


 867       uint oc1 = res->outcnt();
 868 
 869       if (use->is_AddP()) {
 870         for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) {
 871           Node *n = use->last_out(k);
 872           uint oc2 = use->outcnt();
 873           if (n->is_Store()) {
 874 #ifdef ASSERT
 875             // Verify that there is no dependent MemBarVolatile nodes,
 876             // they should be removed during IGVN, see MemBarNode::Ideal().
 877             for (DUIterator_Fast pmax, p = n->fast_outs(pmax);
 878                                        p < pmax; p++) {
 879               Node* mb = n->fast_out(p);
 880               assert(mb->is_Initialize() || !mb->is_MemBar() ||
 881                      mb->req() <= MemBarNode::Precedent ||
 882                      mb->in(MemBarNode::Precedent) != n,
 883                      "MemBarVolatile should be eliminated for non-escaping object");
 884             }
 885 #endif
 886             _igvn.replace_node(n, n->in(MemNode::Memory));


 887           } else {
 888             eliminate_card_mark(n);
 889           }
 890           k -= (oc2 - use->outcnt());
 891         }

 892       } else {
 893         eliminate_card_mark(use);
 894       }
 895       j -= (oc1 - res->outcnt());
 896     }
 897     assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
 898     _igvn.remove_dead_node(res);
 899   }
 900 
 901   //
 902   // Process other users of allocation's projections
 903   //
 904   if (_resproj != NULL && _resproj->outcnt() != 0) {
 905     // First disconnect stores captured by Initialize node.
 906     // If Initialize node is eliminated first in the following code,
 907     // it will kill such stores and DUIterator_Last will assert.
 908     for (DUIterator_Fast jmax, j = _resproj->fast_outs(jmax);  j < jmax; j++) {
 909       Node *use = _resproj->fast_out(j);
 910       if (use->is_AddP()) {
 911         // raw memory addresses used only by the initialization


1267       contended_phi_rawmem = mem;
1268     } else {
1269       contended_region = new (C) RegionNode(3);
1270       contended_phi_rawmem = new (C) PhiNode(contended_region, Type::MEMORY, TypeRawPtr::BOTTOM);
1271       // Now handle the passing-too-big test.  We fall into the contended
1272       // loop-back merge point.
1273       contended_region    ->init_req(fall_in_path, toobig_false);
1274       contended_phi_rawmem->init_req(fall_in_path, mem);
1275       transform_later(contended_region);
1276       transform_later(contended_phi_rawmem);
1277     }
1278 
1279     // Load(-locked) the heap top.
1280     // See note above concerning the control input when using a TLAB
1281     Node *old_eden_top = UseTLAB
1282       ? new (C) LoadPNode      (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered)
1283       : new (C) LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr, MemNode::acquire);
1284 
1285     transform_later(old_eden_top);
1286     // Add to heap top to get a new heap top








1287     Node *new_eden_top = new (C) AddPNode(top(), old_eden_top, size_in_bytes);
1288     transform_later(new_eden_top);
1289     // Check for needing a GC; compare against heap end
1290     Node *needgc_cmp = new (C) CmpPNode(new_eden_top, eden_end);
1291     transform_later(needgc_cmp);
1292     Node *needgc_bol = new (C) BoolNode(needgc_cmp, BoolTest::ge);
1293     transform_later(needgc_bol);
1294     IfNode *needgc_iff = new (C) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
1295     transform_later(needgc_iff);
1296 
1297     // Plug the failing-heap-space-need-gc test into the slow-path region
1298     Node *needgc_true = new (C) IfTrueNode(needgc_iff);
1299     transform_later(needgc_true);
1300     if (initial_slow_test) {
1301       slow_region->init_req(need_gc_path, needgc_true);
1302       // This completes all paths into the slow merge point
1303       transform_later(slow_region);
1304     } else {                      // No initial slow path needed!
1305       // Just fall from the need-GC path straight into the VM call.
1306       slow_region = needgc_true;


1357 
1358       // Bump total allocated bytes for this thread
1359       Node* thread = new (C) ThreadLocalNode();
1360       transform_later(thread);
1361       Node* alloc_bytes_adr = basic_plus_adr(top()/*not oop*/, thread,
1362                                              in_bytes(JavaThread::allocated_bytes_offset()));
1363       Node* alloc_bytes = make_load(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
1364                                     0, TypeLong::LONG, T_LONG);
1365 #ifdef _LP64
1366       Node* alloc_size = size_in_bytes;
1367 #else
1368       Node* alloc_size = new (C) ConvI2LNode(size_in_bytes);
1369       transform_later(alloc_size);
1370 #endif
1371       Node* new_alloc_bytes = new (C) AddLNode(alloc_bytes, alloc_size);
1372       transform_later(new_alloc_bytes);
1373       fast_oop_rawmem = make_store(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
1374                                    0, new_alloc_bytes, T_LONG);
1375     }
1376 






1377     InitializeNode* init = alloc->initialization();
1378     fast_oop_rawmem = initialize_object(alloc,
1379                                         fast_oop_ctrl, fast_oop_rawmem, fast_oop,
1380                                         klass_node, length, size_in_bytes);
1381 
1382     // If initialization is performed by an array copy, any required
1383     // MemBarStoreStore was already added. If the object does not
1384     // escape no need for a MemBarStoreStore. Otherwise we need a
1385     // MemBarStoreStore so that stores that initialize this object
1386     // can't be reordered with a subsequent store that makes this
1387     // object accessible by other threads.
1388     if ( AARCH64_ONLY ( !alloc->does_not_escape_thread() &&
1389                         (init == NULL ||
1390                          !init->is_complete_with_arraycopy()) )
1391          NOT_AARCH64  ( init == NULL ||
1392                         (!init->is_complete_with_arraycopy() &&
1393                          !init->does_not_escape()) )
1394        ) {
1395       if (init == NULL || init->req() < InitializeNode::RawStores) {
1396         // No InitializeNode or no stores captured by zeroing
1397         // elimination. Simply add the MemBarStoreStore after object
1398         // initialization.
1399         MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
1400         transform_later(mb);


1638   Node* mark_node = NULL;
1639   // For now only enable fast locking for non-array types
1640   if (UseBiasedLocking && (length == NULL)) {
1641     mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
1642   } else {
1643     mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
1644   }
1645   rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
1646 
1647   rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
1648   int header_size = alloc->minimum_header_size();  // conservatively small
1649 
1650   // Array length
1651   if (length != NULL) {         // Arrays need length field
1652     rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
1653     // conservatively small header size:
1654     header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1655     ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass();
1656     if (k->is_array_klass())    // we know the exact header size in most cases:
1657       header_size = Klass::layout_helper_header_size(k->layout_helper());





1658   }
1659 
1660   // Clear the object body, if necessary.
1661   if (init == NULL) {
1662     // The init has somehow disappeared; be cautious and clear everything.
1663     //
1664     // This can happen if a node is allocated but an uncommon trap occurs
1665     // immediately.  In this case, the Initialize gets associated with the
1666     // trap, and may be placed in a different (outer) loop, if the Allocate
1667     // is in a loop.  If (this is rare) the inner loop gets unrolled, then
1668     // there can be two Allocates to one Initialize.  The answer in all these
1669     // edge cases is safety first.  It is always safe to clear immediately
1670     // within an Allocate, and then (maybe or maybe not) clear some more later.
1671     if (!ZeroTLAB)
1672       rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
1673                                             header_size, size_in_bytes,
1674                                             &_igvn);
1675   } else {
1676     if (!init->is_complete()) {
1677       // Try to win by zeroing only what the init does not store.




   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "gc_implementation/shenandoah/shenandoahBrooksPointer.hpp"
  28 #include "libadt/vectset.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/cfgnode.hpp"
  32 #include "opto/compile.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/locknode.hpp"
  35 #include "opto/loopnode.hpp"
  36 #include "opto/macro.hpp"
  37 #include "opto/memnode.hpp"
  38 #include "opto/node.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/shenandoahSupport.hpp"
  43 #include "opto/subnode.hpp"
  44 #include "opto/type.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 
  47 
  48 //
  49 // Replace any references to "oldref" in inputs to "use" with "newref".
  50 // Returns the number of replacements made.
  51 //
  52 int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
  53   int nreplacements = 0;
  54   uint req = use->req();
  55   for (uint j = 0; j < use->len(); j++) {
  56     Node *uin = use->in(j);
  57     if (uin == oldref) {
  58       if (j < req)
  59         use->set_req(j, newref);
  60       else
  61         use->set_prec(j, newref);
  62       nreplacements++;


 429   for (uint j = 1; j < length; j++) {
 430     Node *in = mem->in(j);
 431     if (in == NULL || in->is_top()) {
 432       values.at_put(j, in);
 433     } else  {
 434       Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
 435       if (val == start_mem || val == alloc_mem) {
 436         // hit a sentinel, return appropriate 0 value
 437         values.at_put(j, _igvn.zerocon(ft));
 438         continue;
 439       }
 440       if (val->is_Initialize()) {
 441         val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 442       }
 443       if (val == NULL) {
 444         return NULL;  // can't find a value on this path
 445       }
 446       if (val == mem) {
 447         values.at_put(j, mem);
 448       } else if (val->is_Store()) {
 449         values.at_put(j, ShenandoahBarrierNode::skip_through_barrier(val->in(MemNode::ValueIn)));
 450       } else if(val->is_Proj() && val->in(0) == alloc) {
 451         values.at_put(j, _igvn.zerocon(ft));
 452       } else if (val->is_Phi()) {
 453         val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
 454         if (val == NULL) {
 455           return NULL;
 456         }
 457         values.at_put(j, val);
 458       } else if (val->Opcode() == Op_SCMemProj) {
 459         assert(val->in(0)->is_LoadStore() || val->in(0)->Opcode() == Op_EncodeISOArray, "sanity");
 460         assert(false, "Object is not scalar replaceable if a LoadStore node access its field");
 461         return NULL;
 462       } else {
 463 #ifdef ASSERT
 464         val->dump();
 465         assert(false, "unknown node on this path");
 466 #endif
 467         return NULL;  // unknown node on this path
 468       }
 469     }


 531           unique_input = n;
 532         } else if (unique_input != n) {
 533           unique_input = top;
 534           break;
 535         }
 536       }
 537       if (unique_input != NULL && unique_input != top) {
 538         mem = unique_input;
 539       } else {
 540         done = true;
 541       }
 542     } else {
 543       assert(false, "unexpected node");
 544     }
 545   }
 546   if (mem != NULL) {
 547     if (mem == start_mem || mem == alloc_mem) {
 548       // hit a sentinel, return appropriate 0 value
 549       return _igvn.zerocon(ft);
 550     } else if (mem->is_Store()) {
 551       return ShenandoahBarrierNode::skip_through_barrier(mem->in(MemNode::ValueIn));
 552     } else if (mem->is_Phi()) {
 553       // attempt to produce a Phi reflecting the values on the input paths of the Phi
 554       Node_Stack value_phis(a, 8);
 555       Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
 556       if (phi != NULL) {
 557         return phi;
 558       } else {
 559         // Kill all new Phis
 560         while(value_phis.is_nonempty()) {
 561           Node* n = value_phis.node();
 562           _igvn.replace_node(n, C->top());
 563           value_phis.pop();
 564         }
 565       }
 566     }
 567   }
 568   // Something go wrong.
 569   return NULL;
 570 }
 571 


 598     }
 599   }
 600 
 601   if (can_eliminate && res != NULL) {
 602     for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
 603                                j < jmax && can_eliminate; j++) {
 604       Node* use = res->fast_out(j);
 605 
 606       if (use->is_AddP()) {
 607         const TypePtr* addp_type = _igvn.type(use)->is_ptr();
 608         int offset = addp_type->offset();
 609 
 610         if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
 611           NOT_PRODUCT(fail_eliminate = "Undefined field referrence";)
 612           can_eliminate = false;
 613           break;
 614         }
 615         for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
 616                                    k < kmax && can_eliminate; k++) {
 617           Node* n = use->fast_out(k);
 618           if (!n->is_Store() && n->Opcode() != Op_CastP2X &&
 619               (!UseShenandoahGC || !n->is_g1_wb_pre_call())) {
 620             DEBUG_ONLY(disq_node = n;)
 621             if (n->is_Load() || n->is_LoadStore()) {
 622               NOT_PRODUCT(fail_eliminate = "Field load";)
 623             } else {
 624               NOT_PRODUCT(fail_eliminate = "Not store field referrence";)
 625             }
 626             can_eliminate = false;
 627           }
 628         }
 629       } else if (use->is_SafePoint()) {
 630         SafePointNode* sfpt = use->as_SafePoint();
 631         if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) {
 632           // Object is passed as argument.
 633           DEBUG_ONLY(disq_node = use;)
 634           NOT_PRODUCT(fail_eliminate = "Object is passed as argument";)
 635           can_eliminate = false;
 636         }
 637         Node* sfptMem = sfpt->memory();
 638         if (sfptMem == NULL || sfptMem->is_top()) {
 639           DEBUG_ONLY(disq_node = use;)


 870       uint oc1 = res->outcnt();
 871 
 872       if (use->is_AddP()) {
 873         for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) {
 874           Node *n = use->last_out(k);
 875           uint oc2 = use->outcnt();
 876           if (n->is_Store()) {
 877 #ifdef ASSERT
 878             // Verify that there is no dependent MemBarVolatile nodes,
 879             // they should be removed during IGVN, see MemBarNode::Ideal().
 880             for (DUIterator_Fast pmax, p = n->fast_outs(pmax);
 881                                        p < pmax; p++) {
 882               Node* mb = n->fast_out(p);
 883               assert(mb->is_Initialize() || !mb->is_MemBar() ||
 884                      mb->req() <= MemBarNode::Precedent ||
 885                      mb->in(MemBarNode::Precedent) != n,
 886                      "MemBarVolatile should be eliminated for non-escaping object");
 887             }
 888 #endif
 889             _igvn.replace_node(n, n->in(MemNode::Memory));
 890           } else if (UseShenandoahGC && n->is_g1_wb_pre_call()) {
 891             C->shenandoah_eliminate_g1_wb_pre(n, &_igvn);
 892           } else {
 893             eliminate_card_mark(n);
 894           }
 895           k -= (oc2 - use->outcnt());
 896         }
 897         _igvn.remove_dead_node(use);
 898       } else {
 899         eliminate_card_mark(use);
 900       }
 901       j -= (oc1 - res->outcnt());
 902     }
 903     assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
 904     _igvn.remove_dead_node(res);
 905   }
 906 
 907   //
 908   // Process other users of allocation's projections
 909   //
 910   if (_resproj != NULL && _resproj->outcnt() != 0) {
 911     // First disconnect stores captured by Initialize node.
 912     // If Initialize node is eliminated first in the following code,
 913     // it will kill such stores and DUIterator_Last will assert.
 914     for (DUIterator_Fast jmax, j = _resproj->fast_outs(jmax);  j < jmax; j++) {
 915       Node *use = _resproj->fast_out(j);
 916       if (use->is_AddP()) {
 917         // raw memory addresses used only by the initialization


1273       contended_phi_rawmem = mem;
1274     } else {
1275       contended_region = new (C) RegionNode(3);
1276       contended_phi_rawmem = new (C) PhiNode(contended_region, Type::MEMORY, TypeRawPtr::BOTTOM);
1277       // Now handle the passing-too-big test.  We fall into the contended
1278       // loop-back merge point.
1279       contended_region    ->init_req(fall_in_path, toobig_false);
1280       contended_phi_rawmem->init_req(fall_in_path, mem);
1281       transform_later(contended_region);
1282       transform_later(contended_phi_rawmem);
1283     }
1284 
1285     // Load(-locked) the heap top.
1286     // See note above concerning the control input when using a TLAB
1287     Node *old_eden_top = UseTLAB
1288       ? new (C) LoadPNode      (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered)
1289       : new (C) LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr, MemNode::acquire);
1290 
1291     transform_later(old_eden_top);
1292     // Add to heap top to get a new heap top
1293 
1294     Node* init_size_in_bytes = size_in_bytes;
1295     if (UseShenandoahGC) {
1296       // Allocate several words more for the Shenandoah brooks pointer.
1297       size_in_bytes = new (C) AddXNode(size_in_bytes, _igvn.MakeConX(ShenandoahBrooksPointer::byte_size()));
1298       transform_later(size_in_bytes);
1299     }
1300 
1301     Node *new_eden_top = new (C) AddPNode(top(), old_eden_top, size_in_bytes);
1302     transform_later(new_eden_top);
1303     // Check for needing a GC; compare against heap end
1304     Node *needgc_cmp = new (C) CmpPNode(new_eden_top, eden_end);
1305     transform_later(needgc_cmp);
1306     Node *needgc_bol = new (C) BoolNode(needgc_cmp, BoolTest::ge);
1307     transform_later(needgc_bol);
1308     IfNode *needgc_iff = new (C) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
1309     transform_later(needgc_iff);
1310 
1311     // Plug the failing-heap-space-need-gc test into the slow-path region
1312     Node *needgc_true = new (C) IfTrueNode(needgc_iff);
1313     transform_later(needgc_true);
1314     if (initial_slow_test) {
1315       slow_region->init_req(need_gc_path, needgc_true);
1316       // This completes all paths into the slow merge point
1317       transform_later(slow_region);
1318     } else {                      // No initial slow path needed!
1319       // Just fall from the need-GC path straight into the VM call.
1320       slow_region = needgc_true;


1371 
1372       // Bump total allocated bytes for this thread
1373       Node* thread = new (C) ThreadLocalNode();
1374       transform_later(thread);
1375       Node* alloc_bytes_adr = basic_plus_adr(top()/*not oop*/, thread,
1376                                              in_bytes(JavaThread::allocated_bytes_offset()));
1377       Node* alloc_bytes = make_load(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
1378                                     0, TypeLong::LONG, T_LONG);
1379 #ifdef _LP64
1380       Node* alloc_size = size_in_bytes;
1381 #else
1382       Node* alloc_size = new (C) ConvI2LNode(size_in_bytes);
1383       transform_later(alloc_size);
1384 #endif
1385       Node* new_alloc_bytes = new (C) AddLNode(alloc_bytes, alloc_size);
1386       transform_later(new_alloc_bytes);
1387       fast_oop_rawmem = make_store(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
1388                                    0, new_alloc_bytes, T_LONG);
1389     }
1390 
1391     if (UseShenandoahGC) {
1392       // Bump up object for Shenandoah brooks pointer.
1393       fast_oop = new (C) AddPNode(top(), fast_oop, _igvn.MakeConX(ShenandoahBrooksPointer::byte_size()));
1394       transform_later(fast_oop);
1395     }
1396 
1397     InitializeNode* init = alloc->initialization();
1398     fast_oop_rawmem = initialize_object(alloc,
1399                                         fast_oop_ctrl, fast_oop_rawmem, fast_oop,
1400                                         klass_node, length, init_size_in_bytes);
1401 
1402     // If initialization is performed by an array copy, any required
1403     // MemBarStoreStore was already added. If the object does not
1404     // escape no need for a MemBarStoreStore. Otherwise we need a
1405     // MemBarStoreStore so that stores that initialize this object
1406     // can't be reordered with a subsequent store that makes this
1407     // object accessible by other threads.
1408     if ( AARCH64_ONLY ( !alloc->does_not_escape_thread() &&
1409                         (init == NULL ||
1410                          !init->is_complete_with_arraycopy()) )
1411          NOT_AARCH64  ( init == NULL ||
1412                         (!init->is_complete_with_arraycopy() &&
1413                          !init->does_not_escape()) )
1414        ) {
1415       if (init == NULL || init->req() < InitializeNode::RawStores) {
1416         // No InitializeNode or no stores captured by zeroing
1417         // elimination. Simply add the MemBarStoreStore after object
1418         // initialization.
1419         MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
1420         transform_later(mb);


1658   Node* mark_node = NULL;
1659   // For now only enable fast locking for non-array types
1660   if (UseBiasedLocking && (length == NULL)) {
1661     mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
1662   } else {
1663     mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
1664   }
1665   rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
1666 
1667   rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
1668   int header_size = alloc->minimum_header_size();  // conservatively small
1669 
1670   // Array length
1671   if (length != NULL) {         // Arrays need length field
1672     rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
1673     // conservatively small header size:
1674     header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1675     ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass();
1676     if (k->is_array_klass())    // we know the exact header size in most cases:
1677       header_size = Klass::layout_helper_header_size(k->layout_helper());
1678   }
1679 
1680   if (UseShenandoahGC) {
1681     // Initialize Shenandoah brooks pointer to point to the object itself.
1682     rawmem = make_store(control, rawmem, object, ShenandoahBrooksPointer::byte_offset(), object, T_OBJECT);
1683   }
1684 
1685   // Clear the object body, if necessary.
1686   if (init == NULL) {
1687     // The init has somehow disappeared; be cautious and clear everything.
1688     //
1689     // This can happen if a node is allocated but an uncommon trap occurs
1690     // immediately.  In this case, the Initialize gets associated with the
1691     // trap, and may be placed in a different (outer) loop, if the Allocate
1692     // is in a loop.  If (this is rare) the inner loop gets unrolled, then
1693     // there can be two Allocates to one Initialize.  The answer in all these
1694     // edge cases is safety first.  It is always safe to clear immediately
1695     // within an Allocate, and then (maybe or maybe not) clear some more later.
1696     if (!ZeroTLAB)
1697       rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
1698                                             header_size, size_in_bytes,
1699                                             &_igvn);
1700   } else {
1701     if (!init->is_complete()) {
1702       // Try to win by zeroing only what the init does not store.


< prev index next >