20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "ci/bcEscapeAnalyzer.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "opto/callGenerator.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/escape.hpp"
32 #include "opto/locknode.hpp"
33 #include "opto/machnode.hpp"
34 #include "opto/matcher.hpp"
35 #include "opto/parse.hpp"
36 #include "opto/regalloc.hpp"
37 #include "opto/regmask.hpp"
38 #include "opto/rootnode.hpp"
39 #include "opto/runtime.hpp"
40
41 // Portions of code courtesy of Clifford Click
42
43 // Optimization - Graph Style
44
45 //=============================================================================
46 uint StartNode::size_of() const { return sizeof(*this); }
47 uint StartNode::cmp( const Node &n ) const
48 { return _domain == ((StartNode&)n)._domain; }
49 const Type *StartNode::bottom_type() const { return _domain; }
50 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
51 #ifndef PRODUCT
52 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
53 #endif
54
55 //------------------------------Ideal------------------------------------------
56 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
57 return remove_dead_region(phase, can_reshape) ? this : NULL;
58 }
59
790 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
791 Node *use = p->fast_out(i);
792 if (use->is_CheckCastPP()) {
793 if (cast != NULL) {
794 return this; // more than 1 CheckCastPP
795 }
796 cast = use;
797 } else if (!use->is_Initialize() &&
798 !use->is_AddP()) {
799 // Expected uses are restricted to a CheckCastPP, an Initialize
800 // node, and AddP nodes. If we encounter any other use (a Phi
801 // node can be seen in rare cases) return this to prevent
802 // incorrect optimizations.
803 return this;
804 }
805 }
806 return cast;
807 }
808
809
810 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj) {
811 projs->fallthrough_proj = NULL;
812 projs->fallthrough_catchproj = NULL;
813 projs->fallthrough_ioproj = NULL;
814 projs->catchall_ioproj = NULL;
815 projs->catchall_catchproj = NULL;
816 projs->fallthrough_memproj = NULL;
817 projs->catchall_memproj = NULL;
818 projs->resproj = NULL;
819 projs->exobj = NULL;
820
821 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
822 ProjNode *pn = fast_out(i)->as_Proj();
823 if (pn->outcnt() == 0) continue;
824 switch (pn->_con) {
825 case TypeFunc::Control:
826 {
827 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
828 projs->fallthrough_proj = pn;
829 DUIterator_Fast jmax, j = pn->fast_outs(jmax);
830 const Node *cn = pn->fast_out(j);
853 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
854 assert(projs->exobj == NULL, "only one");
855 projs->exobj = e;
856 }
857 }
858 break;
859 case TypeFunc::Memory:
860 if (pn->_is_io_use)
861 projs->catchall_memproj = pn;
862 else
863 projs->fallthrough_memproj = pn;
864 break;
865 case TypeFunc::Parms:
866 projs->resproj = pn;
867 break;
868 default:
869 assert(false, "unexpected projection from allocation node.");
870 }
871 }
872
873 // The resproj may not exist because the result couuld be ignored
874 // and the exception object may not exist if an exception handler
875 // swallows the exception but all the other must exist and be found.
876 assert(projs->fallthrough_proj != NULL, "must be found");
877 assert(Compile::current()->inlining_incrementally() || projs->fallthrough_catchproj != NULL, "must be found");
878 assert(Compile::current()->inlining_incrementally() || projs->fallthrough_memproj != NULL, "must be found");
879 assert(Compile::current()->inlining_incrementally() || projs->fallthrough_ioproj != NULL, "must be found");
880 assert(Compile::current()->inlining_incrementally() || projs->catchall_catchproj != NULL, "must be found");
881 if (separate_io_proj) {
882 assert(Compile::current()->inlining_incrementally() || projs->catchall_memproj != NULL, "must be found");
883 assert(Compile::current()->inlining_incrementally() || projs->catchall_ioproj != NULL, "must be found");
884 }
885 }
886
887 Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) {
888 CallGenerator* cg = generator();
889 if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) {
890 // Check whether this MH handle call becomes a candidate for inlining
891 ciMethod* callee = cg->method();
892 vmIntrinsics::ID iid = callee->intrinsic_id();
893 if (iid == vmIntrinsics::_invokeBasic) {
894 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
895 phase->C->prepend_late_inline(cg);
896 set_generator(NULL);
897 }
898 } else {
899 assert(callee->has_member_arg(), "wrong type of call?");
900 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
901 phase->C->prepend_late_inline(cg);
902 set_generator(NULL);
903 }
904 }
905 }
906 return SafePointNode::Ideal(phase, can_reshape);
907 }
908
909
910 //=============================================================================
911 uint CallJavaNode::size_of() const { return sizeof(*this); }
912 uint CallJavaNode::cmp( const Node &n ) const {
913 CallJavaNode &call = (CallJavaNode&)n;
914 return CallNode::cmp(call) && _method == call._method;
915 }
916 #ifndef PRODUCT
917 void CallJavaNode::dump_spec(outputStream *st) const {
918 if( _method ) _method->print_short_name(st);
919 CallNode::dump_spec(st);
920 }
921 #endif
922
923 //=============================================================================
924 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
925 uint CallStaticJavaNode::cmp( const Node &n ) const {
926 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
927 return CallJavaNode::cmp(call);
928 }
929
981
982 //=============================================================================
983 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
984 uint CallRuntimeNode::cmp( const Node &n ) const {
985 CallRuntimeNode &call = (CallRuntimeNode&)n;
986 return CallNode::cmp(call) && !strcmp(_name,call._name);
987 }
988 #ifndef PRODUCT
989 void CallRuntimeNode::dump_spec(outputStream *st) const {
990 st->print("# ");
991 st->print("%s", _name);
992 CallNode::dump_spec(st);
993 }
994 #endif
995
996 //------------------------------calling_convention-----------------------------
997 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
998 Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
999 }
1000
1001 //=============================================================================
1002 //------------------------------calling_convention-----------------------------
1003
1004
1005 //=============================================================================
1006 #ifndef PRODUCT
1007 void CallLeafNode::dump_spec(outputStream *st) const {
1008 st->print("# ");
1009 st->print("%s", _name);
1010 CallNode::dump_spec(st);
1011 }
1012 #endif
1013
1014 //=============================================================================
1015
1016 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1017 assert(verify_jvms(jvms), "jvms must match");
1018 int loc = jvms->locoff() + idx;
1019 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1020 // If current local idx is top then local idx - 1 could
1021 // be a long/double that needs to be killed since top could
1022 // represent the 2nd half ofthe long/double.
1023 uint ideal = in(loc -1)->ideal_reg();
1024 if (ideal == Op_RegD || ideal == Op_RegL) {
1025 // set other (low index) half to top
1026 set_req(loc - 1, in(loc));
1027 }
1028 }
1029 set_req(loc, c);
1030 }
1031
1032 uint SafePointNode::size_of() const { return sizeof(*this); }
1033 uint SafePointNode::cmp( const Node &n ) const {
1505 } else {
1506 break;
1507 }
1508 } else {
1509 break; // found an interesting control
1510 }
1511 }
1512 return ctrl;
1513 }
1514 //
1515 // Given a control, see if it's the control projection of an Unlock which
1516 // operating on the same object as lock.
1517 //
1518 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
1519 GrowableArray<AbstractLockNode*> &lock_ops) {
1520 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
1521 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
1522 Node *n = ctrl_proj->in(0);
1523 if (n != NULL && n->is_Unlock()) {
1524 UnlockNode *unlock = n->as_Unlock();
1525 if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&
1526 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) &&
1527 !unlock->is_eliminated()) {
1528 lock_ops.append(unlock);
1529 return true;
1530 }
1531 }
1532 }
1533 return false;
1534 }
1535
1536 //
1537 // Find the lock matching an unlock. Returns null if a safepoint
1538 // or complicated control is encountered first.
1539 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
1540 LockNode *lock_result = NULL;
1541 // find the matching lock, or an intervening safepoint
1542 Node *ctrl = next_control(unlock->in(0));
1543 while (1) {
1544 assert(ctrl != NULL, "invalid control graph");
1545 assert(!ctrl->is_Start(), "missing lock for unlock");
1550 } else if (ctrl->is_Region()) {
1551 // Check for a simple diamond pattern. Punt on anything more complicated
1552 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
1553 Node *in1 = next_control(ctrl->in(1));
1554 Node *in2 = next_control(ctrl->in(2));
1555 if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
1556 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
1557 ctrl = next_control(in1->in(0)->in(0));
1558 } else {
1559 break;
1560 }
1561 } else {
1562 break;
1563 }
1564 } else {
1565 ctrl = next_control(ctrl->in(0)); // keep searching
1566 }
1567 }
1568 if (ctrl->is_Lock()) {
1569 LockNode *lock = ctrl->as_Lock();
1570 if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&
1571 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) {
1572 lock_result = lock;
1573 }
1574 }
1575 return lock_result;
1576 }
1577
1578 // This code corresponds to case 3 above.
1579
1580 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1581 GrowableArray<AbstractLockNode*> &lock_ops) {
1582 Node* if_node = node->in(0);
1583 bool if_true = node->is_IfTrue();
1584
1585 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
1586 Node *lock_ctrl = next_control(if_node->in(0));
1587 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
1588 Node* lock1_node = NULL;
1589 ProjNode* proj = if_node->as_If()->proj_out(!if_true);
1590 if (if_true) {
1591 if (proj->is_IfFalse() && proj->outcnt() == 1) {
1592 lock1_node = proj->unique_out();
1593 }
1594 } else {
1595 if (proj->is_IfTrue() && proj->outcnt() == 1) {
1596 lock1_node = proj->unique_out();
1597 }
1598 }
1599 if (lock1_node != NULL && lock1_node->is_Lock()) {
1600 LockNode *lock1 = lock1_node->as_Lock();
1601 if (lock->obj_node()->eqv_uncast(lock1->obj_node()) &&
1602 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) &&
1603 !lock1->is_eliminated()) {
1604 lock_ops.append(lock1);
1605 return true;
1606 }
1607 }
1608 }
1609 }
1610
1611 lock_ops.trunc_to(0);
1612 return false;
1613 }
1614
1615 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1616 GrowableArray<AbstractLockNode*> &lock_ops) {
1617 // check each control merging at this point for a matching unlock.
1618 // in(0) should be self edge so skip it.
1619 for (int i = 1; i < (int)region->req(); i++) {
1620 Node *in_node = next_control(region->in(i));
1621 if (in_node != NULL) {
1777 #endif
1778 return false; // External lock or it is not Box (Phi node).
1779 }
1780
1781 // Ignore complex cases: merged locks or multiple locks.
1782 Node* obj = obj_node();
1783 LockNode* unique_lock = NULL;
1784 if (!box->is_simple_lock_region(&unique_lock, obj)) {
1785 #ifdef ASSERT
1786 this->log_lock_optimization(c, "eliminate_lock_INLR_2a");
1787 #endif
1788 return false;
1789 }
1790 if (unique_lock != this) {
1791 #ifdef ASSERT
1792 this->log_lock_optimization(c, "eliminate_lock_INLR_2b");
1793 #endif
1794 return false;
1795 }
1796
1797 // Look for external lock for the same object.
1798 SafePointNode* sfn = this->as_SafePoint();
1799 JVMState* youngest_jvms = sfn->jvms();
1800 int max_depth = youngest_jvms->depth();
1801 for (int depth = 1; depth <= max_depth; depth++) {
1802 JVMState* jvms = youngest_jvms->of_depth(depth);
1803 int num_mon = jvms->nof_monitors();
1804 // Loop over monitors
1805 for (int idx = 0; idx < num_mon; idx++) {
1806 Node* obj_node = sfn->monitor_obj(jvms, idx);
1807 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
1808 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
1809 return true;
1810 }
1811 }
1812 }
1813 #ifdef ASSERT
1814 this->log_lock_optimization(c, "eliminate_lock_INLR_3");
1815 #endif
1816 return false;
1817 }
1818
1819 //=============================================================================
1820 uint UnlockNode::size_of() const { return sizeof(*this); }
1821
1822 //=============================================================================
1823 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1824
1825 // perform any generic optimizations first (returns 'this' or NULL)
1826 Node *result = SafePointNode::Ideal(phase, can_reshape);
|
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "ci/bcEscapeAnalyzer.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "opto/callGenerator.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/escape.hpp"
32 #include "opto/locknode.hpp"
33 #include "opto/machnode.hpp"
34 #include "opto/matcher.hpp"
35 #include "opto/parse.hpp"
36 #include "opto/regalloc.hpp"
37 #include "opto/regmask.hpp"
38 #include "opto/rootnode.hpp"
39 #include "opto/runtime.hpp"
40 #if INCLUDE_ALL_GCS
41 #include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp"
42 #endif
43
44 // Portions of code courtesy of Clifford Click
45
46 // Optimization - Graph Style
47
48 //=============================================================================
49 uint StartNode::size_of() const { return sizeof(*this); }
50 uint StartNode::cmp( const Node &n ) const
51 { return _domain == ((StartNode&)n)._domain; }
52 const Type *StartNode::bottom_type() const { return _domain; }
53 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
54 #ifndef PRODUCT
55 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
56 #endif
57
58 //------------------------------Ideal------------------------------------------
59 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
60 return remove_dead_region(phase, can_reshape) ? this : NULL;
61 }
62
793 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
794 Node *use = p->fast_out(i);
795 if (use->is_CheckCastPP()) {
796 if (cast != NULL) {
797 return this; // more than 1 CheckCastPP
798 }
799 cast = use;
800 } else if (!use->is_Initialize() &&
801 !use->is_AddP()) {
802 // Expected uses are restricted to a CheckCastPP, an Initialize
803 // node, and AddP nodes. If we encounter any other use (a Phi
804 // node can be seen in rare cases) return this to prevent
805 // incorrect optimizations.
806 return this;
807 }
808 }
809 return cast;
810 }
811
812
813 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
814 projs->fallthrough_proj = NULL;
815 projs->fallthrough_catchproj = NULL;
816 projs->fallthrough_ioproj = NULL;
817 projs->catchall_ioproj = NULL;
818 projs->catchall_catchproj = NULL;
819 projs->fallthrough_memproj = NULL;
820 projs->catchall_memproj = NULL;
821 projs->resproj = NULL;
822 projs->exobj = NULL;
823
824 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
825 ProjNode *pn = fast_out(i)->as_Proj();
826 if (pn->outcnt() == 0) continue;
827 switch (pn->_con) {
828 case TypeFunc::Control:
829 {
830 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
831 projs->fallthrough_proj = pn;
832 DUIterator_Fast jmax, j = pn->fast_outs(jmax);
833 const Node *cn = pn->fast_out(j);
856 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
857 assert(projs->exobj == NULL, "only one");
858 projs->exobj = e;
859 }
860 }
861 break;
862 case TypeFunc::Memory:
863 if (pn->_is_io_use)
864 projs->catchall_memproj = pn;
865 else
866 projs->fallthrough_memproj = pn;
867 break;
868 case TypeFunc::Parms:
869 projs->resproj = pn;
870 break;
871 default:
872 assert(false, "unexpected projection from allocation node.");
873 }
874 }
875
876 // The resproj may not exist because the result could be ignored
877 // and the exception object may not exist if an exception handler
878 // swallows the exception but all the other must exist and be found.
879 assert(projs->fallthrough_proj != NULL, "must be found");
880 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
881 assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
882 assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found");
883 assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found");
884 assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found");
885 if (separate_io_proj) {
886 assert(!do_asserts || projs->catchall_memproj != NULL, "must be found");
887 assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found");
888 }
889 }
890
891 Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) {
892 CallGenerator* cg = generator();
893 if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) {
894 // Check whether this MH handle call becomes a candidate for inlining
895 ciMethod* callee = cg->method();
896 vmIntrinsics::ID iid = callee->intrinsic_id();
897 if (iid == vmIntrinsics::_invokeBasic) {
898 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
899 phase->C->prepend_late_inline(cg);
900 set_generator(NULL);
901 }
902 } else {
903 assert(callee->has_member_arg(), "wrong type of call?");
904 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
905 phase->C->prepend_late_inline(cg);
906 set_generator(NULL);
907 }
908 }
909 }
910 return SafePointNode::Ideal(phase, can_reshape);
911 }
912
913 //=============================================================================
914 uint CallJavaNode::size_of() const { return sizeof(*this); }
915 uint CallJavaNode::cmp( const Node &n ) const {
916 CallJavaNode &call = (CallJavaNode&)n;
917 return CallNode::cmp(call) && _method == call._method;
918 }
919 #ifndef PRODUCT
920 void CallJavaNode::dump_spec(outputStream *st) const {
921 if( _method ) _method->print_short_name(st);
922 CallNode::dump_spec(st);
923 }
924 #endif
925
926 //=============================================================================
927 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
928 uint CallStaticJavaNode::cmp( const Node &n ) const {
929 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
930 return CallJavaNode::cmp(call);
931 }
932
984
985 //=============================================================================
986 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
987 uint CallRuntimeNode::cmp( const Node &n ) const {
988 CallRuntimeNode &call = (CallRuntimeNode&)n;
989 return CallNode::cmp(call) && !strcmp(_name,call._name);
990 }
991 #ifndef PRODUCT
992 void CallRuntimeNode::dump_spec(outputStream *st) const {
993 st->print("# ");
994 st->print("%s", _name);
995 CallNode::dump_spec(st);
996 }
997 #endif
998
999 //------------------------------calling_convention-----------------------------
1000 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1001 Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
1002 }
1003
1004 bool CallRuntimeNode::is_call_to_arraycopystub() const {
1005 if (_name != NULL && strstr(_name, "arraycopy") != 0) {
1006 return true;
1007 }
1008 return false;
1009 }
1010
1011 //=============================================================================
1012 //------------------------------calling_convention-----------------------------
1013
1014
1015 //=============================================================================
1016 #ifndef PRODUCT
1017 void CallLeafNode::dump_spec(outputStream *st) const {
1018 st->print("# ");
1019 st->print("%s", _name);
1020 CallNode::dump_spec(st);
1021 }
1022 #endif
1023
1024 Node *CallLeafNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1025 if (UseShenandoahGC && is_g1_wb_pre_call()) {
1026 uint cnt = OptoRuntime::g1_wb_pre_Type()->domain()->cnt();
1027 if (req() > cnt) {
1028 Node* addp = in(cnt);
1029 if (has_only_g1_wb_pre_uses(addp)) {
1030 del_req(cnt);
1031 if (can_reshape) {
1032 phase->is_IterGVN()->_worklist.push(addp);
1033 }
1034 return this;
1035 }
1036 }
1037 }
1038
1039 return CallNode::Ideal(phase, can_reshape);
1040 }
1041
1042 bool CallLeafNode::has_only_g1_wb_pre_uses(Node* n) {
1043 if (UseShenandoahGC) {
1044 return false;
1045 }
1046 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1047 Node* u = n->fast_out(i);
1048 if (!u->is_g1_wb_pre_call()) {
1049 return false;
1050 }
1051 }
1052 return n->outcnt() > 0;
1053 }
1054
1055 //=============================================================================
1056
1057 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1058 assert(verify_jvms(jvms), "jvms must match");
1059 int loc = jvms->locoff() + idx;
1060 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1061 // If current local idx is top then local idx - 1 could
1062 // be a long/double that needs to be killed since top could
1063 // represent the 2nd half ofthe long/double.
1064 uint ideal = in(loc -1)->ideal_reg();
1065 if (ideal == Op_RegD || ideal == Op_RegL) {
1066 // set other (low index) half to top
1067 set_req(loc - 1, in(loc));
1068 }
1069 }
1070 set_req(loc, c);
1071 }
1072
1073 uint SafePointNode::size_of() const { return sizeof(*this); }
1074 uint SafePointNode::cmp( const Node &n ) const {
1546 } else {
1547 break;
1548 }
1549 } else {
1550 break; // found an interesting control
1551 }
1552 }
1553 return ctrl;
1554 }
1555 //
1556 // Given a control, see if it's the control projection of an Unlock which
1557 // operating on the same object as lock.
1558 //
1559 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
1560 GrowableArray<AbstractLockNode*> &lock_ops) {
1561 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
1562 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
1563 Node *n = ctrl_proj->in(0);
1564 if (n != NULL && n->is_Unlock()) {
1565 UnlockNode *unlock = n->as_Unlock();
1566 Node* lock_obj = lock->obj_node();
1567 Node* unlock_obj = unlock->obj_node();
1568 #if INCLUDE_ALL_GCS
1569 if (UseShenandoahGC) {
1570 lock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(lock_obj);
1571 unlock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(unlock_obj);
1572 }
1573 #endif
1574 if (lock_obj->eqv_uncast(unlock_obj) &&
1575 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) &&
1576 !unlock->is_eliminated()) {
1577 lock_ops.append(unlock);
1578 return true;
1579 }
1580 }
1581 }
1582 return false;
1583 }
1584
1585 //
1586 // Find the lock matching an unlock. Returns null if a safepoint
1587 // or complicated control is encountered first.
1588 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
1589 LockNode *lock_result = NULL;
1590 // find the matching lock, or an intervening safepoint
1591 Node *ctrl = next_control(unlock->in(0));
1592 while (1) {
1593 assert(ctrl != NULL, "invalid control graph");
1594 assert(!ctrl->is_Start(), "missing lock for unlock");
1599 } else if (ctrl->is_Region()) {
1600 // Check for a simple diamond pattern. Punt on anything more complicated
1601 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
1602 Node *in1 = next_control(ctrl->in(1));
1603 Node *in2 = next_control(ctrl->in(2));
1604 if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
1605 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
1606 ctrl = next_control(in1->in(0)->in(0));
1607 } else {
1608 break;
1609 }
1610 } else {
1611 break;
1612 }
1613 } else {
1614 ctrl = next_control(ctrl->in(0)); // keep searching
1615 }
1616 }
1617 if (ctrl->is_Lock()) {
1618 LockNode *lock = ctrl->as_Lock();
1619 Node* lock_obj = lock->obj_node();
1620 Node* unlock_obj = unlock->obj_node();
1621 #if INCLUDE_ALL_GCS
1622 if (UseShenandoahGC) {
1623 lock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(lock_obj);
1624 unlock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(unlock_obj);
1625 }
1626 #endif
1627 if (lock_obj->eqv_uncast(unlock_obj) &&
1628 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) {
1629 lock_result = lock;
1630 }
1631 }
1632 return lock_result;
1633 }
1634
1635 // This code corresponds to case 3 above.
1636
1637 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1638 GrowableArray<AbstractLockNode*> &lock_ops) {
1639 Node* if_node = node->in(0);
1640 bool if_true = node->is_IfTrue();
1641
1642 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
1643 Node *lock_ctrl = next_control(if_node->in(0));
1644 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
1645 Node* lock1_node = NULL;
1646 ProjNode* proj = if_node->as_If()->proj_out(!if_true);
1647 if (if_true) {
1648 if (proj->is_IfFalse() && proj->outcnt() == 1) {
1649 lock1_node = proj->unique_out();
1650 }
1651 } else {
1652 if (proj->is_IfTrue() && proj->outcnt() == 1) {
1653 lock1_node = proj->unique_out();
1654 }
1655 }
1656 if (lock1_node != NULL && lock1_node->is_Lock()) {
1657 LockNode *lock1 = lock1_node->as_Lock();
1658 Node* lock_obj = lock->obj_node();
1659 Node* lock1_obj = lock1->obj_node();
1660 #if INCLUDE_ALL_GCS
1661 if (UseShenandoahGC) {
1662 lock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(lock_obj);
1663 lock1_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(lock1_obj);
1664 }
1665 #endif
1666 if (lock_obj->eqv_uncast(lock1_obj) &&
1667 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) &&
1668 !lock1->is_eliminated()) {
1669 lock_ops.append(lock1);
1670 return true;
1671 }
1672 }
1673 }
1674 }
1675
1676 lock_ops.trunc_to(0);
1677 return false;
1678 }
1679
1680 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1681 GrowableArray<AbstractLockNode*> &lock_ops) {
1682 // check each control merging at this point for a matching unlock.
1683 // in(0) should be self edge so skip it.
1684 for (int i = 1; i < (int)region->req(); i++) {
1685 Node *in_node = next_control(region->in(i));
1686 if (in_node != NULL) {
1842 #endif
1843 return false; // External lock or it is not Box (Phi node).
1844 }
1845
1846 // Ignore complex cases: merged locks or multiple locks.
1847 Node* obj = obj_node();
1848 LockNode* unique_lock = NULL;
1849 if (!box->is_simple_lock_region(&unique_lock, obj)) {
1850 #ifdef ASSERT
1851 this->log_lock_optimization(c, "eliminate_lock_INLR_2a");
1852 #endif
1853 return false;
1854 }
1855 if (unique_lock != this) {
1856 #ifdef ASSERT
1857 this->log_lock_optimization(c, "eliminate_lock_INLR_2b");
1858 #endif
1859 return false;
1860 }
1861
1862 #if INCLUDE_ALL_GCS
1863 if (UseShenandoahGC) {
1864 obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(obj);
1865 }
1866 #endif
1867 // Look for external lock for the same object.
1868 SafePointNode* sfn = this->as_SafePoint();
1869 JVMState* youngest_jvms = sfn->jvms();
1870 int max_depth = youngest_jvms->depth();
1871 for (int depth = 1; depth <= max_depth; depth++) {
1872 JVMState* jvms = youngest_jvms->of_depth(depth);
1873 int num_mon = jvms->nof_monitors();
1874 // Loop over monitors
1875 for (int idx = 0; idx < num_mon; idx++) {
1876 Node* obj_node = sfn->monitor_obj(jvms, idx);
1877 #if INCLUDE_ALL_GCS
1878 if (UseShenandoahGC) {
1879 obj_node = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(obj_node);
1880 }
1881 #endif
1882 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
1883 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
1884 return true;
1885 }
1886 }
1887 }
1888 #ifdef ASSERT
1889 this->log_lock_optimization(c, "eliminate_lock_INLR_3");
1890 #endif
1891 return false;
1892 }
1893
1894 //=============================================================================
1895 uint UnlockNode::size_of() const { return sizeof(*this); }
1896
1897 //=============================================================================
1898 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1899
1900 // perform any generic optimizations first (returns 'this' or NULL)
1901 Node *result = SafePointNode::Ideal(phase, can_reshape);
|