10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
28 #include "gc_implementation/g1/heapRegion.hpp"
29 #include "gc_interface/collectedHeap.hpp"
30 #include "memory/barrierSet.hpp"
31 #include "memory/cardTableModRefBS.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/graphKit.hpp"
34 #include "opto/idealKit.hpp"
35 #include "opto/locknode.hpp"
36 #include "opto/machnode.hpp"
37 #include "opto/parse.hpp"
38 #include "opto/rootnode.hpp"
39 #include "opto/runtime.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/sharedRuntime.hpp"
42
43 //----------------------------GraphKit-----------------------------------------
44 // Main utility constructor.
45 GraphKit::GraphKit(JVMState* jvms)
46 : Phase(Phase::Parser),
47 _env(C->env()),
48 _gvn(*C->initial_gvn())
49 {
50 _exceptions = jvms->map()->next_exception();
51 if (_exceptions != NULL) jvms->map()->set_next_exception(NULL);
52 set_jvms(jvms);
53 }
54
55 // Private constructor for parser.
56 GraphKit::GraphKit()
57 : Phase(Phase::Parser),
58 _env(C->env()),
59 _gvn(*C->initial_gvn())
60 {
61 _exceptions = NULL;
62 set_map(NULL);
1518 }
1519
1520
1521 void GraphKit::pre_barrier(bool do_load,
1522 Node* ctl,
1523 Node* obj,
1524 Node* adr,
1525 uint adr_idx,
1526 Node* val,
1527 const TypeOopPtr* val_type,
1528 Node* pre_val,
1529 BasicType bt) {
1530
1531 BarrierSet* bs = Universe::heap()->barrier_set();
1532 set_control(ctl);
1533 switch (bs->kind()) {
1534 case BarrierSet::G1SATBCT:
1535 case BarrierSet::G1SATBCTLogging:
1536 g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1537 break;
1538
1539 case BarrierSet::CardTableModRef:
1540 case BarrierSet::CardTableExtension:
1541 case BarrierSet::ModRef:
1542 break;
1543
1544 case BarrierSet::Other:
1545 default :
1546 ShouldNotReachHere();
1547
1548 }
1549 }
1550
1551 bool GraphKit::can_move_pre_barrier() const {
1552 BarrierSet* bs = Universe::heap()->barrier_set();
1553 switch (bs->kind()) {
1554 case BarrierSet::G1SATBCT:
1555 case BarrierSet::G1SATBCTLogging:
1556 return true; // Can move it if no safepoint
1557
1558 case BarrierSet::CardTableModRef:
1559 case BarrierSet::CardTableExtension:
1560 case BarrierSet::ModRef:
1561 return true; // There is no pre-barrier
1562
1563 case BarrierSet::Other:
1564 default :
1565 ShouldNotReachHere();
1566 }
1567 return false;
1568 }
1569
1570 void GraphKit::post_barrier(Node* ctl,
1571 Node* store,
1572 Node* obj,
1573 Node* adr,
1574 uint adr_idx,
1575 Node* val,
1576 BasicType bt,
1577 bool use_precise) {
1578 BarrierSet* bs = Universe::heap()->barrier_set();
1579 set_control(ctl);
1580 switch (bs->kind()) {
1581 case BarrierSet::G1SATBCT:
1582 case BarrierSet::G1SATBCTLogging:
1583 g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1584 break;
1585
1586 case BarrierSet::CardTableModRef:
1587 case BarrierSet::CardTableExtension:
1588 write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1589 break;
1590
1591 case BarrierSet::ModRef:
1592 break;
1593
1594 case BarrierSet::Other:
1595 default :
1596 ShouldNotReachHere();
1597
1598 }
1599 }
1600
1601 Node* GraphKit::store_oop(Node* ctl,
1602 Node* obj,
1603 Node* adr,
1604 const TypePtr* adr_type,
1605 Node* val,
1690 // number. (The prior range check has ensured this.)
1691 // This assertion is used by ConvI2LNode::Ideal.
1692 int index_max = max_jint - 1; // array size is max_jint, index is one less
1693 if (sizetype != NULL) index_max = sizetype->_hi - 1;
1694 const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax);
1695 idx = C->constrained_convI2L(&_gvn, idx, iidxtype, ctrl);
1696 #endif
1697 Node* scale = _gvn.transform( new (C) LShiftXNode(idx, intcon(shift)) );
1698 return basic_plus_adr(ary, base, scale);
1699 }
1700
1701 //-------------------------load_array_element-------------------------
1702 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1703 const Type* elemtype = arytype->elem();
1704 BasicType elembt = elemtype->array_element_basic_type();
1705 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1706 if (elembt == T_NARROWOOP) {
1707 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1708 }
1709 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1710 return ld;
1711 }
1712
1713 //-------------------------set_arguments_for_java_call-------------------------
1714 // Arguments (pre-popped from the stack) are taken from the JVMS.
1715 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1716 // Add the call arguments:
1717 uint nargs = call->method()->arg_size();
1718 for (uint i = 0; i < nargs; i++) {
1719 Node* arg = argument(i);
1720 call->init_req(i + TypeFunc::Parms, arg);
1721 }
1722 }
1723
1724 //---------------------------set_edges_for_java_call---------------------------
1725 // Connect a newly created call into the current JVMS.
1726 // A return value node (if any) is returned from set_edges_for_java_call.
1727 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1728
1729 // Add the predefined inputs:
3648 Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3649 if (ccast != length) {
3650 _gvn.set_type_bottom(ccast);
3651 record_for_igvn(ccast);
3652 replace_in_map(length, ccast);
3653 }
3654 }
3655
3656 return javaoop;
3657 }
3658
3659 // The following "Ideal_foo" functions are placed here because they recognize
3660 // the graph shapes created by the functions immediately above.
3661
3662 //---------------------------Ideal_allocation----------------------------------
3663 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3664 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3665 if (ptr == NULL) { // reduce dumb test in callers
3666 return NULL;
3667 }
3668 if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
3669 ptr = ptr->in(1);
3670 if (ptr == NULL) return NULL;
3671 }
3672 // Return NULL for allocations with several casts:
3673 // j.l.reflect.Array.newInstance(jobject, jint)
3674 // Object.clone()
3675 // to keep more precise type from last cast.
3676 if (ptr->is_Proj()) {
3677 Node* allo = ptr->in(0);
3678 if (allo != NULL && allo->is_Allocate()) {
3679 return allo->as_Allocate();
3680 }
3681 }
3682 // Report failure to match.
3683 return NULL;
3684 }
3685
3686 // Fancy version which also strips off an offset (and reports it to caller).
3687 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,
3831
3832 // Get the alias_index for raw card-mark memory
3833 int adr_type = Compile::AliasIdxRaw;
3834 Node* zero = __ ConI(0); // Dirty card value
3835 BasicType bt = T_BYTE;
3836
3837 if (UseCondCardMark) {
3838 // The classic GC reference write barrier is typically implemented
3839 // as a store into the global card mark table. Unfortunately
3840 // unconditional stores can result in false sharing and excessive
3841 // coherence traffic as well as false transactional aborts.
3842 // UseCondCardMark enables MP "polite" conditional card mark
3843 // stores. In theory we could relax the load from ctrl() to
3844 // no_ctrl, but that doesn't buy much latitude.
3845 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3846 __ if_then(card_val, BoolTest::ne, zero);
3847 }
3848
3849 // Smash zero into card
3850 if( !UseConcMarkSweepGC ) {
3851 __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::release);
3852 } else {
3853 // Specialized path for CM store barrier
3854 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3855 }
3856
3857 if (UseCondCardMark) {
3858 __ end_if();
3859 }
3860
3861 // Final sync IdealKit and GraphKit.
3862 final_sync(ideal);
3863 }
3864
3865 // G1 pre/post barriers
3866 void GraphKit::g1_write_barrier_pre(bool do_load,
3867 Node* obj,
3868 Node* adr,
3869 uint alias_idx,
3870 Node* val,
3871 const TypeOopPtr* val_type,
3902 float likely = PROB_LIKELY(0.999);
3903 float unlikely = PROB_UNLIKELY(0.999);
3904
3905 BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
3906 assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width");
3907
3908 // Offsets into the thread
3909 const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 648
3910 PtrQueue::byte_offset_of_active());
3911 const int index_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 656
3912 PtrQueue::byte_offset_of_index());
3913 const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 652
3914 PtrQueue::byte_offset_of_buf());
3915
3916 // Now the actual pointers into the thread
3917 Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
3918 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
3919 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
3920
3921 // Now some of the values
3922 Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
3923
3924 // if (!marking)
3925 __ if_then(marking, BoolTest::ne, zero, unlikely); {
3926 BasicType index_bt = TypeX_X->basic_type();
3927 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
3928 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
3929
3930 if (do_load) {
3931 // load original value
3932 // alias_idx correct??
3933 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
3934 }
3935
3936 // if (pre_val != NULL)
3937 __ if_then(pre_val, BoolTest::ne, null()); {
3938 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
3939
3940 // is the queue for this thread full?
3941 __ if_then(index, BoolTest::ne, zeroX, likely); {
3942
3943 // decrement the index
3944 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3945
3946 // Now get the buffer location we will log the previous value into and store it
3947 Node *log_addr = __ AddP(no_base, buffer, next_index);
3948 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
3949 // update the index
3950 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
3951
3952 } __ else_(); {
3953
3954 // logging buffer is full, call the runtime
3955 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3956 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
3957 } __ end_if(); // (!index)
3958 } __ end_if(); // (pre_val != NULL)
3959 } __ end_if(); // (!marking)
3960
3961 // Final sync IdealKit and GraphKit.
3962 final_sync(ideal);
3963 }
3964
3965 //
3966 // Update the card table and add card address to the queue
3967 //
3968 void GraphKit::g1_mark_card(IdealKit& ideal,
3969 Node* card_adr,
3970 Node* oop_store,
3971 uint oop_alias_idx,
3972 Node* index,
3973 Node* index_adr,
3974 Node* buffer,
3975 const TypeFunc* tf) {
3976
3977 Node* zero = __ ConI(0);
3978 Node* zeroX = __ ConX(0);
3979 Node* no_base = __ top();
3980 BasicType card_bt = T_BYTE;
3981 // Smash zero into card. MUST BE ORDERED WRT TO STORE
3982 __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
4134 int count_field_idx = C->get_alias_index(count_field_type);
4135 return make_load(ctrl,
4136 basic_plus_adr(str, str, count_offset),
4137 TypeInt::INT, T_INT, count_field_idx, MemNode::unordered);
4138 } else {
4139 return load_array_length(load_String_value(ctrl, str));
4140 }
4141 }
4142
4143 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
4144 int value_offset = java_lang_String::value_offset_in_bytes();
4145 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4146 false, NULL, 0);
4147 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4148 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4149 TypeAry::make(TypeInt::CHAR,TypeInt::POS),
4150 ciTypeArrayKlass::make(T_CHAR), true, 0);
4151 int value_field_idx = C->get_alias_index(value_field_type);
4152 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
4153 value_type, T_OBJECT, value_field_idx, MemNode::unordered);
4154 // String.value field is known to be @Stable.
4155 if (UseImplicitStableValues) {
4156 load = cast_array_to_stable(load, value_type);
4157 }
4158 return load;
4159 }
4160
4161 void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
4162 int offset_offset = java_lang_String::offset_offset_in_bytes();
4163 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4164 false, NULL, 0);
4165 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4166 int offset_field_idx = C->get_alias_index(offset_field_type);
4167 store_to_memory(ctrl, basic_plus_adr(str, offset_offset),
4168 value, T_INT, offset_field_idx, MemNode::unordered);
4169 }
4170
4171 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
4172 int value_offset = java_lang_String::value_offset_in_bytes();
4173 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
28 #include "gc_implementation/g1/heapRegion.hpp"
29 #include "gc_interface/collectedHeap.hpp"
30 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
31 #include "memory/barrierSet.hpp"
32 #include "memory/cardTableModRefBS.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/graphKit.hpp"
35 #include "opto/idealKit.hpp"
36 #include "opto/locknode.hpp"
37 #include "opto/machnode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "runtime/deoptimization.hpp"
42 #include "runtime/sharedRuntime.hpp"
43
44 #if INCLUDE_ALL_GCS
45 #include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp"
46 #include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp"
47 #endif
48
49 //----------------------------GraphKit-----------------------------------------
50 // Main utility constructor.
51 GraphKit::GraphKit(JVMState* jvms)
52 : Phase(Phase::Parser),
53 _env(C->env()),
54 _gvn(*C->initial_gvn())
55 {
56 _exceptions = jvms->map()->next_exception();
57 if (_exceptions != NULL) jvms->map()->set_next_exception(NULL);
58 set_jvms(jvms);
59 }
60
61 // Private constructor for parser.
62 GraphKit::GraphKit()
63 : Phase(Phase::Parser),
64 _env(C->env()),
65 _gvn(*C->initial_gvn())
66 {
67 _exceptions = NULL;
68 set_map(NULL);
1524 }
1525
1526
1527 void GraphKit::pre_barrier(bool do_load,
1528 Node* ctl,
1529 Node* obj,
1530 Node* adr,
1531 uint adr_idx,
1532 Node* val,
1533 const TypeOopPtr* val_type,
1534 Node* pre_val,
1535 BasicType bt) {
1536
1537 BarrierSet* bs = Universe::heap()->barrier_set();
1538 set_control(ctl);
1539 switch (bs->kind()) {
1540 case BarrierSet::G1SATBCT:
1541 case BarrierSet::G1SATBCTLogging:
1542 g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1543 break;
1544 case BarrierSet::ShenandoahBarrierSet:
1545 if (ShenandoahSATBBarrier) {
1546 g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1547 }
1548 break;
1549 case BarrierSet::CardTableModRef:
1550 case BarrierSet::CardTableExtension:
1551 case BarrierSet::ModRef:
1552 break;
1553
1554 case BarrierSet::Other:
1555 default :
1556 ShouldNotReachHere();
1557
1558 }
1559 }
1560
1561 bool GraphKit::can_move_pre_barrier() const {
1562 BarrierSet* bs = Universe::heap()->barrier_set();
1563 switch (bs->kind()) {
1564 case BarrierSet::G1SATBCT:
1565 case BarrierSet::G1SATBCTLogging:
1566 case BarrierSet::ShenandoahBarrierSet:
1567 return true; // Can move it if no safepoint
1568
1569 case BarrierSet::CardTableModRef:
1570 case BarrierSet::CardTableExtension:
1571 case BarrierSet::ModRef:
1572 return true; // There is no pre-barrier
1573
1574 case BarrierSet::Other:
1575 default :
1576 ShouldNotReachHere();
1577 }
1578 return false;
1579 }
1580
1581 void GraphKit::post_barrier(Node* ctl,
1582 Node* store,
1583 Node* obj,
1584 Node* adr,
1585 uint adr_idx,
1586 Node* val,
1587 BasicType bt,
1588 bool use_precise) {
1589 BarrierSet* bs = Universe::heap()->barrier_set();
1590 set_control(ctl);
1591 switch (bs->kind()) {
1592 case BarrierSet::G1SATBCT:
1593 case BarrierSet::G1SATBCTLogging:
1594 g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1595 break;
1596 case BarrierSet::ShenandoahBarrierSet:
1597 if (ShenandoahStoreValEnqueueBarrier) {
1598 g1_write_barrier_pre(false, NULL, NULL, max_juint, NULL, NULL, val, bt);
1599 }
1600 break;
1601 case BarrierSet::CardTableModRef:
1602 case BarrierSet::CardTableExtension:
1603 write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1604 break;
1605
1606 case BarrierSet::ModRef:
1607 break;
1608
1609 case BarrierSet::Other:
1610 default :
1611 ShouldNotReachHere();
1612
1613 }
1614 }
1615
1616 Node* GraphKit::store_oop(Node* ctl,
1617 Node* obj,
1618 Node* adr,
1619 const TypePtr* adr_type,
1620 Node* val,
1705 // number. (The prior range check has ensured this.)
1706 // This assertion is used by ConvI2LNode::Ideal.
1707 int index_max = max_jint - 1; // array size is max_jint, index is one less
1708 if (sizetype != NULL) index_max = sizetype->_hi - 1;
1709 const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax);
1710 idx = C->constrained_convI2L(&_gvn, idx, iidxtype, ctrl);
1711 #endif
1712 Node* scale = _gvn.transform( new (C) LShiftXNode(idx, intcon(shift)) );
1713 return basic_plus_adr(ary, base, scale);
1714 }
1715
1716 //-------------------------load_array_element-------------------------
1717 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1718 const Type* elemtype = arytype->elem();
1719 BasicType elembt = elemtype->array_element_basic_type();
1720 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1721 if (elembt == T_NARROWOOP) {
1722 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1723 }
1724 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1725 #if INCLUDE_ALL_GCS
1726 if (UseShenandoahGC && (elembt == T_OBJECT || elembt == T_ARRAY)) {
1727 ld = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, ld);
1728 }
1729 #endif
1730 return ld;
1731 }
1732
1733 //-------------------------set_arguments_for_java_call-------------------------
1734 // Arguments (pre-popped from the stack) are taken from the JVMS.
1735 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1736 // Add the call arguments:
1737 uint nargs = call->method()->arg_size();
1738 for (uint i = 0; i < nargs; i++) {
1739 Node* arg = argument(i);
1740 call->init_req(i + TypeFunc::Parms, arg);
1741 }
1742 }
1743
1744 //---------------------------set_edges_for_java_call---------------------------
1745 // Connect a newly created call into the current JVMS.
1746 // A return value node (if any) is returned from set_edges_for_java_call.
1747 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1748
1749 // Add the predefined inputs:
3668 Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3669 if (ccast != length) {
3670 _gvn.set_type_bottom(ccast);
3671 record_for_igvn(ccast);
3672 replace_in_map(length, ccast);
3673 }
3674 }
3675
3676 return javaoop;
3677 }
3678
3679 // The following "Ideal_foo" functions are placed here because they recognize
3680 // the graph shapes created by the functions immediately above.
3681
3682 //---------------------------Ideal_allocation----------------------------------
3683 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3684 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3685 if (ptr == NULL) { // reduce dumb test in callers
3686 return NULL;
3687 }
3688
3689 #if INCLUDE_ALL_GCS
3690 if (UseShenandoahGC) {
3691 ptr = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(ptr);
3692 }
3693 #endif
3694 if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
3695 ptr = ptr->in(1);
3696 if (ptr == NULL) return NULL;
3697 }
3698 // Return NULL for allocations with several casts:
3699 // j.l.reflect.Array.newInstance(jobject, jint)
3700 // Object.clone()
3701 // to keep more precise type from last cast.
3702 if (ptr->is_Proj()) {
3703 Node* allo = ptr->in(0);
3704 if (allo != NULL && allo->is_Allocate()) {
3705 return allo->as_Allocate();
3706 }
3707 }
3708 // Report failure to match.
3709 return NULL;
3710 }
3711
3712 // Fancy version which also strips off an offset (and reports it to caller).
3713 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,
3857
3858 // Get the alias_index for raw card-mark memory
3859 int adr_type = Compile::AliasIdxRaw;
3860 Node* zero = __ ConI(0); // Dirty card value
3861 BasicType bt = T_BYTE;
3862
3863 if (UseCondCardMark) {
3864 // The classic GC reference write barrier is typically implemented
3865 // as a store into the global card mark table. Unfortunately
3866 // unconditional stores can result in false sharing and excessive
3867 // coherence traffic as well as false transactional aborts.
3868 // UseCondCardMark enables MP "polite" conditional card mark
3869 // stores. In theory we could relax the load from ctrl() to
3870 // no_ctrl, but that doesn't buy much latitude.
3871 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3872 __ if_then(card_val, BoolTest::ne, zero);
3873 }
3874
3875 // Smash zero into card
3876 if( !UseConcMarkSweepGC ) {
3877 __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::unordered);
3878 } else {
3879 // Specialized path for CM store barrier
3880 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3881 }
3882
3883 if (UseCondCardMark) {
3884 __ end_if();
3885 }
3886
3887 // Final sync IdealKit and GraphKit.
3888 final_sync(ideal);
3889 }
3890
3891 // G1 pre/post barriers
3892 void GraphKit::g1_write_barrier_pre(bool do_load,
3893 Node* obj,
3894 Node* adr,
3895 uint alias_idx,
3896 Node* val,
3897 const TypeOopPtr* val_type,
3928 float likely = PROB_LIKELY(0.999);
3929 float unlikely = PROB_UNLIKELY(0.999);
3930
3931 BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
3932 assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width");
3933
3934 // Offsets into the thread
3935 const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 648
3936 PtrQueue::byte_offset_of_active());
3937 const int index_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 656
3938 PtrQueue::byte_offset_of_index());
3939 const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 652
3940 PtrQueue::byte_offset_of_buf());
3941
3942 // Now the actual pointers into the thread
3943 Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
3944 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
3945 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
3946
3947 // Now some of the values
3948 Node* marking;
3949 if (UseShenandoahGC) {
3950 Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(JavaThread::gc_state_offset())));
3951 Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
3952 marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING));
3953 assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape");
3954 } else {
3955 assert(UseG1GC, "should be");
3956 marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
3957 }
3958
3959 // if (!marking)
3960 __ if_then(marking, BoolTest::ne, zero, unlikely); {
3961 BasicType index_bt = TypeX_X->basic_type();
3962 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
3963 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
3964
3965 if (do_load) {
3966 // load original value
3967 // alias_idx correct??
3968 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
3969 }
3970
3971 // if (pre_val != NULL)
3972 __ if_then(pre_val, BoolTest::ne, null()); {
3973 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
3974
3975 // is the queue for this thread full?
3976 __ if_then(index, BoolTest::ne, zeroX, likely); {
3977
3978 // decrement the index
3979 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3980
3981 // Now get the buffer location we will log the previous value into and store it
3982 Node *log_addr = __ AddP(no_base, buffer, next_index);
3983 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
3984 // update the index
3985 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
3986
3987 } __ else_(); {
3988
3989 // logging buffer is full, call the runtime
3990 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3991 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
3992 } __ end_if(); // (!index)
3993 } __ end_if(); // (pre_val != NULL)
3994 } __ end_if(); // (!marking)
3995
3996 // Final sync IdealKit and GraphKit.
3997 final_sync(ideal);
3998
3999 #if INCLUDE_ALL_GCS
4000 if (UseShenandoahGC && adr != NULL) {
4001 Node* c = control();
4002 Node* call = c->in(1)->in(1)->in(1)->in(0);
4003 assert(call->is_g1_wb_pre_call(), "g1_wb_pre call expected");
4004 call->add_req(adr);
4005 }
4006 #endif
4007 }
4008
4009 //
4010 // Update the card table and add card address to the queue
4011 //
4012 void GraphKit::g1_mark_card(IdealKit& ideal,
4013 Node* card_adr,
4014 Node* oop_store,
4015 uint oop_alias_idx,
4016 Node* index,
4017 Node* index_adr,
4018 Node* buffer,
4019 const TypeFunc* tf) {
4020
4021 Node* zero = __ ConI(0);
4022 Node* zeroX = __ ConX(0);
4023 Node* no_base = __ top();
4024 BasicType card_bt = T_BYTE;
4025 // Smash zero into card. MUST BE ORDERED WRT TO STORE
4026 __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
4178 int count_field_idx = C->get_alias_index(count_field_type);
4179 return make_load(ctrl,
4180 basic_plus_adr(str, str, count_offset),
4181 TypeInt::INT, T_INT, count_field_idx, MemNode::unordered);
4182 } else {
4183 return load_array_length(load_String_value(ctrl, str));
4184 }
4185 }
4186
4187 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
4188 int value_offset = java_lang_String::value_offset_in_bytes();
4189 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4190 false, NULL, 0);
4191 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4192 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4193 TypeAry::make(TypeInt::CHAR,TypeInt::POS),
4194 ciTypeArrayKlass::make(T_CHAR), true, 0);
4195 int value_field_idx = C->get_alias_index(value_field_type);
4196 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
4197 value_type, T_OBJECT, value_field_idx, MemNode::unordered);
4198 #if INCLUDE_ALL_GCS
4199 if (UseShenandoahGC) {
4200 load = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load);
4201 }
4202 #endif
4203 // String.value field is known to be @Stable.
4204 if (UseImplicitStableValues) {
4205 load = cast_array_to_stable(load, value_type);
4206 }
4207 return load;
4208 }
4209
4210 void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
4211 int offset_offset = java_lang_String::offset_offset_in_bytes();
4212 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4213 false, NULL, 0);
4214 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4215 int offset_field_idx = C->get_alias_index(offset_field_type);
4216 store_to_memory(ctrl, basic_plus_adr(str, offset_offset),
4217 value, T_INT, offset_field_idx, MemNode::unordered);
4218 }
4219
4220 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
4221 int value_offset = java_lang_String::value_offset_in_bytes();
4222 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
|