< prev index next >

src/share/vm/opto/graphKit.cpp

Print this page




  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  28 #include "gc_implementation/g1/heapRegion.hpp"
  29 #include "gc_interface/collectedHeap.hpp"

  30 #include "memory/barrierSet.hpp"
  31 #include "memory/cardTableModRefBS.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/graphKit.hpp"
  34 #include "opto/idealKit.hpp"
  35 #include "opto/locknode.hpp"
  36 #include "opto/machnode.hpp"
  37 #include "opto/parse.hpp"
  38 #include "opto/rootnode.hpp"
  39 #include "opto/runtime.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 





  43 //----------------------------GraphKit-----------------------------------------
  44 // Main utility constructor.
  45 GraphKit::GraphKit(JVMState* jvms)
  46   : Phase(Phase::Parser),
  47     _env(C->env()),
  48     _gvn(*C->initial_gvn())
  49 {
  50   _exceptions = jvms->map()->next_exception();
  51   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  52   set_jvms(jvms);
  53 }
  54 
  55 // Private constructor for parser.
  56 GraphKit::GraphKit()
  57   : Phase(Phase::Parser),
  58     _env(C->env()),
  59     _gvn(*C->initial_gvn())
  60 {
  61   _exceptions = NULL;
  62   set_map(NULL);


1518 }
1519 
1520 
1521 void GraphKit::pre_barrier(bool do_load,
1522                            Node* ctl,
1523                            Node* obj,
1524                            Node* adr,
1525                            uint  adr_idx,
1526                            Node* val,
1527                            const TypeOopPtr* val_type,
1528                            Node* pre_val,
1529                            BasicType bt) {
1530 
1531   BarrierSet* bs = Universe::heap()->barrier_set();
1532   set_control(ctl);
1533   switch (bs->kind()) {
1534     case BarrierSet::G1SATBCT:
1535     case BarrierSet::G1SATBCTLogging:
1536       g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1537       break;
1538 




1539     case BarrierSet::CardTableModRef:
1540     case BarrierSet::CardTableExtension:
1541     case BarrierSet::ModRef:
1542       break;
1543 
1544     case BarrierSet::Other:
1545     default      :
1546       ShouldNotReachHere();
1547 
1548   }
1549 }
1550 
1551 bool GraphKit::can_move_pre_barrier() const {
1552   BarrierSet* bs = Universe::heap()->barrier_set();
1553   switch (bs->kind()) {
1554     case BarrierSet::G1SATBCT:
1555     case BarrierSet::G1SATBCTLogging:

1556       return true; // Can move it if no safepoint
1557 
1558     case BarrierSet::CardTableModRef:
1559     case BarrierSet::CardTableExtension:
1560     case BarrierSet::ModRef:
1561       return true; // There is no pre-barrier
1562 
1563     case BarrierSet::Other:
1564     default      :
1565       ShouldNotReachHere();
1566   }
1567   return false;
1568 }
1569 
1570 void GraphKit::post_barrier(Node* ctl,
1571                             Node* store,
1572                             Node* obj,
1573                             Node* adr,
1574                             uint  adr_idx,
1575                             Node* val,
1576                             BasicType bt,
1577                             bool use_precise) {
1578   BarrierSet* bs = Universe::heap()->barrier_set();
1579   set_control(ctl);
1580   switch (bs->kind()) {
1581     case BarrierSet::G1SATBCT:
1582     case BarrierSet::G1SATBCTLogging:
1583       g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1584       break;
1585 




1586     case BarrierSet::CardTableModRef:
1587     case BarrierSet::CardTableExtension:
1588       write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1589       break;
1590 
1591     case BarrierSet::ModRef:
1592       break;
1593 
1594     case BarrierSet::Other:
1595     default      :
1596       ShouldNotReachHere();
1597 
1598   }
1599 }
1600 
1601 Node* GraphKit::store_oop(Node* ctl,
1602                           Node* obj,
1603                           Node* adr,
1604                           const TypePtr* adr_type,
1605                           Node* val,


1689   // a type assertion that its value is known to be a small positive
1690   // number.  (The prior range check has ensured this.)
1691   // This assertion is used by ConvI2LNode::Ideal.
1692   int index_max = max_jint - 1;  // array size is max_jint, index is one less
1693   if (sizetype != NULL) index_max = sizetype->_hi - 1;
1694   const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax);
1695   idx = C->constrained_convI2L(&_gvn, idx, iidxtype, ctrl);
1696 #endif
1697   Node* scale = _gvn.transform( new (C) LShiftXNode(idx, intcon(shift)) );
1698   return basic_plus_adr(ary, base, scale);
1699 }
1700 
1701 //-------------------------load_array_element-------------------------
1702 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1703   const Type* elemtype = arytype->elem();
1704   BasicType elembt = elemtype->array_element_basic_type();
1705   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1706   if (elembt == T_NARROWOOP) {
1707     elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1708   }

1709   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1710   return ld;
1711 }
1712 
1713 //-------------------------set_arguments_for_java_call-------------------------
1714 // Arguments (pre-popped from the stack) are taken from the JVMS.
1715 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1716   // Add the call arguments:
1717   uint nargs = call->method()->arg_size();
1718   for (uint i = 0; i < nargs; i++) {
1719     Node* arg = argument(i);
1720     call->init_req(i + TypeFunc::Parms, arg);
1721   }
1722 }
1723 
1724 //---------------------------set_edges_for_java_call---------------------------
1725 // Connect a newly created call into the current JVMS.
1726 // A return value node (if any) is returned from set_edges_for_java_call.
1727 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1728 


3648     Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3649     if (ccast != length) {
3650       _gvn.set_type_bottom(ccast);
3651       record_for_igvn(ccast);
3652       replace_in_map(length, ccast);
3653     }
3654   }
3655 
3656   return javaoop;
3657 }
3658 
3659 // The following "Ideal_foo" functions are placed here because they recognize
3660 // the graph shapes created by the functions immediately above.
3661 
3662 //---------------------------Ideal_allocation----------------------------------
3663 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3664 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3665   if (ptr == NULL) {     // reduce dumb test in callers
3666     return NULL;
3667   }






3668   if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
3669     ptr = ptr->in(1);
3670     if (ptr == NULL) return NULL;
3671   }
3672   // Return NULL for allocations with several casts:
3673   //   j.l.reflect.Array.newInstance(jobject, jint)
3674   //   Object.clone()
3675   // to keep more precise type from last cast.
3676   if (ptr->is_Proj()) {
3677     Node* allo = ptr->in(0);
3678     if (allo != NULL && allo->is_Allocate()) {
3679       return allo->as_Allocate();
3680     }
3681   }
3682   // Report failure to match.
3683   return NULL;
3684 }
3685 
3686 // Fancy version which also strips off an offset (and reports it to caller).
3687 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,


3831 
3832   // Get the alias_index for raw card-mark memory
3833   int adr_type = Compile::AliasIdxRaw;
3834   Node*   zero = __ ConI(0); // Dirty card value
3835   BasicType bt = T_BYTE;
3836 
3837   if (UseCondCardMark) {
3838     // The classic GC reference write barrier is typically implemented
3839     // as a store into the global card mark table.  Unfortunately
3840     // unconditional stores can result in false sharing and excessive
3841     // coherence traffic as well as false transactional aborts.
3842     // UseCondCardMark enables MP "polite" conditional card mark
3843     // stores.  In theory we could relax the load from ctrl() to
3844     // no_ctrl, but that doesn't buy much latitude.
3845     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3846     __ if_then(card_val, BoolTest::ne, zero);
3847   }
3848 
3849   // Smash zero into card
3850   if( !UseConcMarkSweepGC ) {
3851     __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::release);
3852   } else {
3853     // Specialized path for CM store barrier
3854     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3855   }
3856 
3857   if (UseCondCardMark) {
3858     __ end_if();
3859   }
3860 
3861   // Final sync IdealKit and GraphKit.
3862   final_sync(ideal);
3863 }
3864 









3865 // G1 pre/post barriers
3866 void GraphKit::g1_write_barrier_pre(bool do_load,
3867                                     Node* obj,
3868                                     Node* adr,
3869                                     uint alias_idx,
3870                                     Node* val,
3871                                     const TypeOopPtr* val_type,
3872                                     Node* pre_val,
3873                                     BasicType bt) {
3874 
3875   // Some sanity checks
3876   // Note: val is unused in this routine.
3877 
3878   if (do_load) {
3879     // We need to generate the load of the previous value
3880     assert(obj != NULL, "must have a base");
3881     assert(adr != NULL, "where are loading from?");
3882     assert(pre_val == NULL, "loaded already?");
3883     assert(val_type != NULL, "need a type");
3884   } else {


3902   float likely  = PROB_LIKELY(0.999);
3903   float unlikely  = PROB_UNLIKELY(0.999);
3904 
3905   BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
3906   assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width");
3907 
3908   // Offsets into the thread
3909   const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +  // 648
3910                                           PtrQueue::byte_offset_of_active());
3911   const int index_offset   = in_bytes(JavaThread::satb_mark_queue_offset() +  // 656
3912                                           PtrQueue::byte_offset_of_index());
3913   const int buffer_offset  = in_bytes(JavaThread::satb_mark_queue_offset() +  // 652
3914                                           PtrQueue::byte_offset_of_buf());
3915 
3916   // Now the actual pointers into the thread
3917   Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
3918   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
3919   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
3920 
3921   // Now some of the values
3922   Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);









3923 
3924   // if (!marking)
3925   __ if_then(marking, BoolTest::ne, zero, unlikely); {
3926     BasicType index_bt = TypeX_X->basic_type();
3927     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
3928     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
3929 
3930     if (do_load) {
3931       // load original value
3932       // alias_idx correct??
3933       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
3934     }
3935 
3936     // if (pre_val != NULL)
3937     __ if_then(pre_val, BoolTest::ne, null()); {
3938       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
3939 
3940       // is the queue for this thread full?
3941       __ if_then(index, BoolTest::ne, zeroX, likely); {
3942 
3943         // decrement the index
3944         Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3945 
3946         // Now get the buffer location we will log the previous value into and store it
3947         Node *log_addr = __ AddP(no_base, buffer, next_index);
3948         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
3949         // update the index
3950         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
3951 
3952       } __ else_(); {
3953 
3954         // logging buffer is full, call the runtime
3955         const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3956         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
3957       } __ end_if();  // (!index)
3958     } __ end_if();  // (pre_val != NULL)
3959   } __ end_if();  // (!marking)
3960 
3961   // Final sync IdealKit and GraphKit.
3962   final_sync(ideal);

3963 }
3964 
3965 //
3966 // Update the card table and add card address to the queue
3967 //
3968 void GraphKit::g1_mark_card(IdealKit& ideal,
3969                             Node* card_adr,
3970                             Node* oop_store,
3971                             uint oop_alias_idx,
3972                             Node* index,
3973                             Node* index_adr,
3974                             Node* buffer,
3975                             const TypeFunc* tf) {
3976 
3977   Node* zero  = __ ConI(0);
3978   Node* zeroX = __ ConX(0);
3979   Node* no_base = __ top();
3980   BasicType card_bt = T_BYTE;
3981   // Smash zero into card. MUST BE ORDERED WRT TO STORE
3982   __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);


4134     int count_field_idx = C->get_alias_index(count_field_type);
4135     return make_load(ctrl,
4136                      basic_plus_adr(str, str, count_offset),
4137                      TypeInt::INT, T_INT, count_field_idx, MemNode::unordered);
4138   } else {
4139     return load_array_length(load_String_value(ctrl, str));
4140   }
4141 }
4142 
4143 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
4144   int value_offset = java_lang_String::value_offset_in_bytes();
4145   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4146                                                      false, NULL, 0);
4147   const TypePtr* value_field_type = string_type->add_offset(value_offset);
4148   const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
4149                                                    TypeAry::make(TypeInt::CHAR,TypeInt::POS),
4150                                                    ciTypeArrayKlass::make(T_CHAR), true, 0);
4151   int value_field_idx = C->get_alias_index(value_field_type);
4152   Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
4153                          value_type, T_OBJECT, value_field_idx, MemNode::unordered);





4154   // String.value field is known to be @Stable.
4155   if (UseImplicitStableValues) {
4156     load = cast_array_to_stable(load, value_type);
4157   }
4158   return load;
4159 }
4160 
4161 void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
4162   int offset_offset = java_lang_String::offset_offset_in_bytes();
4163   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4164                                                      false, NULL, 0);
4165   const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4166   int offset_field_idx = C->get_alias_index(offset_field_type);
4167   store_to_memory(ctrl, basic_plus_adr(str, offset_offset),
4168                   value, T_INT, offset_field_idx, MemNode::unordered);
4169 }
4170 
4171 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
4172   int value_offset = java_lang_String::value_offset_in_bytes();
4173   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),




  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  28 #include "gc_implementation/g1/heapRegion.hpp"
  29 #include "gc_interface/collectedHeap.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
  31 #include "memory/barrierSet.hpp"
  32 #include "memory/cardTableModRefBS.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/graphKit.hpp"
  35 #include "opto/idealKit.hpp"
  36 #include "opto/locknode.hpp"
  37 #include "opto/machnode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "runtime/deoptimization.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 
  44 #if INCLUDE_ALL_GCS
  45 #include "gc_implementation/shenandoah/shenandoahBarrierSetC2.hpp"
  46 #include "gc_implementation/shenandoah/shenandoahSupport.hpp"
  47 #endif
  48 
  49 //----------------------------GraphKit-----------------------------------------
  50 // Main utility constructor.
  51 GraphKit::GraphKit(JVMState* jvms)
  52   : Phase(Phase::Parser),
  53     _env(C->env()),
  54     _gvn(*C->initial_gvn())
  55 {
  56   _exceptions = jvms->map()->next_exception();
  57   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  58   set_jvms(jvms);
  59 }
  60 
  61 // Private constructor for parser.
  62 GraphKit::GraphKit()
  63   : Phase(Phase::Parser),
  64     _env(C->env()),
  65     _gvn(*C->initial_gvn())
  66 {
  67   _exceptions = NULL;
  68   set_map(NULL);


1524 }
1525 
1526 
1527 void GraphKit::pre_barrier(bool do_load,
1528                            Node* ctl,
1529                            Node* obj,
1530                            Node* adr,
1531                            uint  adr_idx,
1532                            Node* val,
1533                            const TypeOopPtr* val_type,
1534                            Node* pre_val,
1535                            BasicType bt) {
1536 
1537   BarrierSet* bs = Universe::heap()->barrier_set();
1538   set_control(ctl);
1539   switch (bs->kind()) {
1540     case BarrierSet::G1SATBCT:
1541     case BarrierSet::G1SATBCTLogging:
1542       g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1543       break;
1544     case BarrierSet::ShenandoahBarrierSet:
1545       if (ShenandoahSATBBarrier) {
1546         g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1547       }
1548       break;
1549     case BarrierSet::CardTableModRef:
1550     case BarrierSet::CardTableExtension:
1551     case BarrierSet::ModRef:
1552       break;
1553 
1554     case BarrierSet::Other:
1555     default      :
1556       ShouldNotReachHere();
1557 
1558   }
1559 }
1560 
1561 bool GraphKit::can_move_pre_barrier() const {
1562   BarrierSet* bs = Universe::heap()->barrier_set();
1563   switch (bs->kind()) {
1564     case BarrierSet::G1SATBCT:
1565     case BarrierSet::G1SATBCTLogging:
1566     case BarrierSet::ShenandoahBarrierSet:
1567       return true; // Can move it if no safepoint
1568 
1569     case BarrierSet::CardTableModRef:
1570     case BarrierSet::CardTableExtension:
1571     case BarrierSet::ModRef:
1572       return true; // There is no pre-barrier
1573 
1574     case BarrierSet::Other:
1575     default      :
1576       ShouldNotReachHere();
1577   }
1578   return false;
1579 }
1580 
1581 void GraphKit::post_barrier(Node* ctl,
1582                             Node* store,
1583                             Node* obj,
1584                             Node* adr,
1585                             uint  adr_idx,
1586                             Node* val,
1587                             BasicType bt,
1588                             bool use_precise) {
1589   BarrierSet* bs = Universe::heap()->barrier_set();
1590   set_control(ctl);
1591   switch (bs->kind()) {
1592     case BarrierSet::G1SATBCT:
1593     case BarrierSet::G1SATBCTLogging:
1594       g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1595       break;
1596     case BarrierSet::ShenandoahBarrierSet:
1597       if (ShenandoahStoreValEnqueueBarrier) {
1598         g1_write_barrier_pre(false, NULL, NULL, max_juint, NULL, NULL, val, bt);
1599       }
1600       break;
1601     case BarrierSet::CardTableModRef:
1602     case BarrierSet::CardTableExtension:
1603       write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1604       break;
1605 
1606     case BarrierSet::ModRef:
1607       break;
1608 
1609     case BarrierSet::Other:
1610     default      :
1611       ShouldNotReachHere();
1612 
1613   }
1614 }
1615 
1616 Node* GraphKit::store_oop(Node* ctl,
1617                           Node* obj,
1618                           Node* adr,
1619                           const TypePtr* adr_type,
1620                           Node* val,


1704   // a type assertion that its value is known to be a small positive
1705   // number.  (The prior range check has ensured this.)
1706   // This assertion is used by ConvI2LNode::Ideal.
1707   int index_max = max_jint - 1;  // array size is max_jint, index is one less
1708   if (sizetype != NULL) index_max = sizetype->_hi - 1;
1709   const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax);
1710   idx = C->constrained_convI2L(&_gvn, idx, iidxtype, ctrl);
1711 #endif
1712   Node* scale = _gvn.transform( new (C) LShiftXNode(idx, intcon(shift)) );
1713   return basic_plus_adr(ary, base, scale);
1714 }
1715 
1716 //-------------------------load_array_element-------------------------
1717 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1718   const Type* elemtype = arytype->elem();
1719   BasicType elembt = elemtype->array_element_basic_type();
1720   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1721   if (elembt == T_NARROWOOP) {
1722     elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1723   }
1724   assert(elembt != T_OBJECT && elembt != T_ARRAY, "sanity");
1725   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1726   return ld;
1727 }
1728 
1729 //-------------------------set_arguments_for_java_call-------------------------
1730 // Arguments (pre-popped from the stack) are taken from the JVMS.
1731 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1732   // Add the call arguments:
1733   uint nargs = call->method()->arg_size();
1734   for (uint i = 0; i < nargs; i++) {
1735     Node* arg = argument(i);
1736     call->init_req(i + TypeFunc::Parms, arg);
1737   }
1738 }
1739 
1740 //---------------------------set_edges_for_java_call---------------------------
1741 // Connect a newly created call into the current JVMS.
1742 // A return value node (if any) is returned from set_edges_for_java_call.
1743 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1744 


3664     Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3665     if (ccast != length) {
3666       _gvn.set_type_bottom(ccast);
3667       record_for_igvn(ccast);
3668       replace_in_map(length, ccast);
3669     }
3670   }
3671 
3672   return javaoop;
3673 }
3674 
3675 // The following "Ideal_foo" functions are placed here because they recognize
3676 // the graph shapes created by the functions immediately above.
3677 
3678 //---------------------------Ideal_allocation----------------------------------
3679 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3680 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3681   if (ptr == NULL) {     // reduce dumb test in callers
3682     return NULL;
3683   }
3684 
3685 #if INCLUDE_ALL_GCS
3686   if (UseShenandoahGC) {
3687     ptr = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(ptr);
3688   }
3689 #endif
3690   if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
3691     ptr = ptr->in(1);
3692     if (ptr == NULL) return NULL;
3693   }
3694   // Return NULL for allocations with several casts:
3695   //   j.l.reflect.Array.newInstance(jobject, jint)
3696   //   Object.clone()
3697   // to keep more precise type from last cast.
3698   if (ptr->is_Proj()) {
3699     Node* allo = ptr->in(0);
3700     if (allo != NULL && allo->is_Allocate()) {
3701       return allo->as_Allocate();
3702     }
3703   }
3704   // Report failure to match.
3705   return NULL;
3706 }
3707 
3708 // Fancy version which also strips off an offset (and reports it to caller).
3709 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,


3853 
3854   // Get the alias_index for raw card-mark memory
3855   int adr_type = Compile::AliasIdxRaw;
3856   Node*   zero = __ ConI(0); // Dirty card value
3857   BasicType bt = T_BYTE;
3858 
3859   if (UseCondCardMark) {
3860     // The classic GC reference write barrier is typically implemented
3861     // as a store into the global card mark table.  Unfortunately
3862     // unconditional stores can result in false sharing and excessive
3863     // coherence traffic as well as false transactional aborts.
3864     // UseCondCardMark enables MP "polite" conditional card mark
3865     // stores.  In theory we could relax the load from ctrl() to
3866     // no_ctrl, but that doesn't buy much latitude.
3867     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3868     __ if_then(card_val, BoolTest::ne, zero);
3869   }
3870 
3871   // Smash zero into card
3872   if( !UseConcMarkSweepGC ) {
3873     __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::unordered);
3874   } else {
3875     // Specialized path for CM store barrier
3876     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3877   }
3878 
3879   if (UseCondCardMark) {
3880     __ end_if();
3881   }
3882 
3883   // Final sync IdealKit and GraphKit.
3884   final_sync(ideal);
3885 }
3886 
3887 static void g1_write_barrier_pre_helper(const GraphKit& kit, Node* adr) {
3888   if (UseShenandoahGC && adr != NULL) {
3889     Node* c = kit.control();
3890     Node* call = c->in(1)->in(1)->in(1)->in(0);
3891     assert(call->is_g1_wb_pre_call(), "g1_wb_pre call expected");
3892     call->add_req(adr);
3893   }
3894 }
3895 
3896 // G1 pre/post barriers
3897 void GraphKit::g1_write_barrier_pre(bool do_load,
3898                                     Node* obj,
3899                                     Node* adr,
3900                                     uint alias_idx,
3901                                     Node* val,
3902                                     const TypeOopPtr* val_type,
3903                                     Node* pre_val,
3904                                     BasicType bt) {
3905 
3906   // Some sanity checks
3907   // Note: val is unused in this routine.
3908 
3909   if (do_load) {
3910     // We need to generate the load of the previous value
3911     assert(obj != NULL, "must have a base");
3912     assert(adr != NULL, "where are loading from?");
3913     assert(pre_val == NULL, "loaded already?");
3914     assert(val_type != NULL, "need a type");
3915   } else {


3933   float likely  = PROB_LIKELY(0.999);
3934   float unlikely  = PROB_UNLIKELY(0.999);
3935 
3936   BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
3937   assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width");
3938 
3939   // Offsets into the thread
3940   const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +  // 648
3941                                           PtrQueue::byte_offset_of_active());
3942   const int index_offset   = in_bytes(JavaThread::satb_mark_queue_offset() +  // 656
3943                                           PtrQueue::byte_offset_of_index());
3944   const int buffer_offset  = in_bytes(JavaThread::satb_mark_queue_offset() +  // 652
3945                                           PtrQueue::byte_offset_of_buf());
3946 
3947   // Now the actual pointers into the thread
3948   Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
3949   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
3950   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
3951 
3952   // Now some of the values
3953   Node* marking;
3954   if (UseShenandoahGC) {
3955     Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(JavaThread::gc_state_offset())));
3956     Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
3957     marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL));
3958     assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape");
3959   } else {
3960     assert(UseG1GC, "should be");
3961     marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
3962   }
3963 
3964   // if (!marking)
3965   __ if_then(marking, BoolTest::ne, zero, unlikely); {
3966     BasicType index_bt = TypeX_X->basic_type();
3967     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
3968     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
3969 
3970     if (do_load) {
3971       // load original value
3972       // alias_idx correct??
3973       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
3974     }
3975 
3976     // if (pre_val != NULL)
3977     __ if_then(pre_val, BoolTest::ne, null()); {
3978       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
3979 
3980       // is the queue for this thread full?
3981       __ if_then(index, BoolTest::ne, zeroX, likely); {
3982 
3983         // decrement the index
3984         Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3985 
3986         // Now get the buffer location we will log the previous value into and store it
3987         Node *log_addr = __ AddP(no_base, buffer, next_index);
3988         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
3989         // update the index
3990         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
3991 
3992       } __ else_(); {
3993 
3994         // logging buffer is full, call the runtime
3995         const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3996         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
3997       } __ end_if();  // (!index)
3998     } __ end_if();  // (pre_val != NULL)
3999   } __ end_if();  // (!marking)
4000 
4001   // Final sync IdealKit and GraphKit.
4002   final_sync(ideal);
4003   g1_write_barrier_pre_helper(*this, adr);
4004 }
4005 
4006 //
4007 // Update the card table and add card address to the queue
4008 //
4009 void GraphKit::g1_mark_card(IdealKit& ideal,
4010                             Node* card_adr,
4011                             Node* oop_store,
4012                             uint oop_alias_idx,
4013                             Node* index,
4014                             Node* index_adr,
4015                             Node* buffer,
4016                             const TypeFunc* tf) {
4017 
4018   Node* zero  = __ ConI(0);
4019   Node* zeroX = __ ConX(0);
4020   Node* no_base = __ top();
4021   BasicType card_bt = T_BYTE;
4022   // Smash zero into card. MUST BE ORDERED WRT TO STORE
4023   __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);


4175     int count_field_idx = C->get_alias_index(count_field_type);
4176     return make_load(ctrl,
4177                      basic_plus_adr(str, str, count_offset),
4178                      TypeInt::INT, T_INT, count_field_idx, MemNode::unordered);
4179   } else {
4180     return load_array_length(load_String_value(ctrl, str));
4181   }
4182 }
4183 
4184 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
4185   int value_offset = java_lang_String::value_offset_in_bytes();
4186   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4187                                                      false, NULL, 0);
4188   const TypePtr* value_field_type = string_type->add_offset(value_offset);
4189   const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
4190                                                    TypeAry::make(TypeInt::CHAR,TypeInt::POS),
4191                                                    ciTypeArrayKlass::make(T_CHAR), true, 0);
4192   int value_field_idx = C->get_alias_index(value_field_type);
4193   Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
4194                          value_type, T_OBJECT, value_field_idx, MemNode::unordered);
4195 #if INCLUDE_ALL_GCS
4196   if (UseShenandoahGC) {
4197     load = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load);
4198   }
4199 #endif
4200   // String.value field is known to be @Stable.
4201   if (UseImplicitStableValues) {
4202     load = cast_array_to_stable(load, value_type);
4203   }
4204   return load;
4205 }
4206 
4207 void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
4208   int offset_offset = java_lang_String::offset_offset_in_bytes();
4209   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4210                                                      false, NULL, 0);
4211   const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4212   int offset_field_idx = C->get_alias_index(offset_field_type);
4213   store_to_memory(ctrl, basic_plus_adr(str, offset_offset),
4214                   value, T_INT, offset_field_idx, MemNode::unordered);
4215 }
4216 
4217 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
4218   int value_offset = java_lang_String::value_offset_in_bytes();
4219   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),


< prev index next >