< prev index next >

src/share/vm/opto/callnode.cpp

Print this page




  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "ci/bcEscapeAnalyzer.hpp"
  28 #include "compiler/oopMap.hpp"
  29 #include "opto/callGenerator.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/escape.hpp"
  32 #include "opto/locknode.hpp"
  33 #include "opto/machnode.hpp"
  34 #include "opto/matcher.hpp"
  35 #include "opto/parse.hpp"
  36 #include "opto/regalloc.hpp"
  37 #include "opto/regmask.hpp"
  38 #include "opto/rootnode.hpp"
  39 #include "opto/runtime.hpp"



  40 
  41 // Portions of code courtesy of Clifford Click
  42 
  43 // Optimization - Graph Style
  44 
  45 //=============================================================================
  46 uint StartNode::size_of() const { return sizeof(*this); }
  47 uint StartNode::cmp( const Node &n ) const
  48 { return _domain == ((StartNode&)n)._domain; }
  49 const Type *StartNode::bottom_type() const { return _domain; }
  50 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
  51 #ifndef PRODUCT
  52 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
  53 #endif
  54 
  55 //------------------------------Ideal------------------------------------------
  56 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
  57   return remove_dead_region(phase, can_reshape) ? this : NULL;
  58 }
  59 


 790   for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
 791     Node *use = p->fast_out(i);
 792     if (use->is_CheckCastPP()) {
 793       if (cast != NULL) {
 794         return this;  // more than 1 CheckCastPP
 795       }
 796       cast = use;
 797     } else if (!use->is_Initialize() &&
 798                !use->is_AddP()) {
 799       // Expected uses are restricted to a CheckCastPP, an Initialize
 800       // node, and AddP nodes. If we encounter any other use (a Phi
 801       // node can be seen in rare cases) return this to prevent
 802       // incorrect optimizations.
 803       return this;
 804     }
 805   }
 806   return cast;
 807 }
 808 
 809 
 810 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj) {
 811   projs->fallthrough_proj      = NULL;
 812   projs->fallthrough_catchproj = NULL;
 813   projs->fallthrough_ioproj    = NULL;
 814   projs->catchall_ioproj       = NULL;
 815   projs->catchall_catchproj    = NULL;
 816   projs->fallthrough_memproj   = NULL;
 817   projs->catchall_memproj      = NULL;
 818   projs->resproj               = NULL;
 819   projs->exobj                 = NULL;
 820 
 821   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 822     ProjNode *pn = fast_out(i)->as_Proj();
 823     if (pn->outcnt() == 0) continue;
 824     switch (pn->_con) {
 825     case TypeFunc::Control:
 826       {
 827         // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
 828         projs->fallthrough_proj = pn;
 829         DUIterator_Fast jmax, j = pn->fast_outs(jmax);
 830         const Node *cn = pn->fast_out(j);


 853         if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
 854           assert(projs->exobj == NULL, "only one");
 855           projs->exobj = e;
 856         }
 857       }
 858       break;
 859     case TypeFunc::Memory:
 860       if (pn->_is_io_use)
 861         projs->catchall_memproj = pn;
 862       else
 863         projs->fallthrough_memproj = pn;
 864       break;
 865     case TypeFunc::Parms:
 866       projs->resproj = pn;
 867       break;
 868     default:
 869       assert(false, "unexpected projection from allocation node.");
 870     }
 871   }
 872 
 873   // The resproj may not exist because the result couuld be ignored
 874   // and the exception object may not exist if an exception handler
 875   // swallows the exception but all the other must exist and be found.
 876   assert(projs->fallthrough_proj      != NULL, "must be found");
 877   assert(Compile::current()->inlining_incrementally() || projs->fallthrough_catchproj != NULL, "must be found");
 878   assert(Compile::current()->inlining_incrementally() || projs->fallthrough_memproj   != NULL, "must be found");
 879   assert(Compile::current()->inlining_incrementally() || projs->fallthrough_ioproj    != NULL, "must be found");
 880   assert(Compile::current()->inlining_incrementally() || projs->catchall_catchproj    != NULL, "must be found");

 881   if (separate_io_proj) {
 882     assert(Compile::current()->inlining_incrementally() || projs->catchall_memproj    != NULL, "must be found");
 883     assert(Compile::current()->inlining_incrementally() || projs->catchall_ioproj     != NULL, "must be found");
 884   }
 885 }
 886 
 887 Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 888   CallGenerator* cg = generator();
 889   if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) {
 890     // Check whether this MH handle call becomes a candidate for inlining
 891     ciMethod* callee = cg->method();
 892     vmIntrinsics::ID iid = callee->intrinsic_id();
 893     if (iid == vmIntrinsics::_invokeBasic) {
 894       if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
 895         phase->C->prepend_late_inline(cg);
 896         set_generator(NULL);
 897       }
 898     } else {
 899       assert(callee->has_member_arg(), "wrong type of call?");
 900       if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
 901         phase->C->prepend_late_inline(cg);
 902         set_generator(NULL);
 903       }
 904     }
 905   }
 906   return SafePointNode::Ideal(phase, can_reshape);
 907 }
 908 
 909 
 910 //=============================================================================
 911 uint CallJavaNode::size_of() const { return sizeof(*this); }
 912 uint CallJavaNode::cmp( const Node &n ) const {
 913   CallJavaNode &call = (CallJavaNode&)n;
 914   return CallNode::cmp(call) && _method == call._method;
 915 }
 916 #ifndef PRODUCT
 917 void CallJavaNode::dump_spec(outputStream *st) const {
 918   if( _method ) _method->print_short_name(st);
 919   CallNode::dump_spec(st);
 920 }
 921 #endif
 922 
 923 //=============================================================================
 924 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
 925 uint CallStaticJavaNode::cmp( const Node &n ) const {
 926   CallStaticJavaNode &call = (CallStaticJavaNode&)n;
 927   return CallJavaNode::cmp(call);
 928 }
 929 


 981 
 982 //=============================================================================
 983 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
 984 uint CallRuntimeNode::cmp( const Node &n ) const {
 985   CallRuntimeNode &call = (CallRuntimeNode&)n;
 986   return CallNode::cmp(call) && !strcmp(_name,call._name);
 987 }
 988 #ifndef PRODUCT
 989 void CallRuntimeNode::dump_spec(outputStream *st) const {
 990   st->print("# ");
 991   st->print("%s", _name);
 992   CallNode::dump_spec(st);
 993 }
 994 #endif
 995 
 996 //------------------------------calling_convention-----------------------------
 997 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
 998   Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
 999 }
1000 







1001 //=============================================================================
1002 //------------------------------calling_convention-----------------------------
1003 
1004 
1005 //=============================================================================
1006 #ifndef PRODUCT
1007 void CallLeafNode::dump_spec(outputStream *st) const {
1008   st->print("# ");
1009   st->print("%s", _name);
1010   CallNode::dump_spec(st);
1011 }
1012 #endif
1013 































1014 //=============================================================================
1015 
1016 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1017   assert(verify_jvms(jvms), "jvms must match");
1018   int loc = jvms->locoff() + idx;
1019   if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1020     // If current local idx is top then local idx - 1 could
1021     // be a long/double that needs to be killed since top could
1022     // represent the 2nd half ofthe long/double.
1023     uint ideal = in(loc -1)->ideal_reg();
1024     if (ideal == Op_RegD || ideal == Op_RegL) {
1025       // set other (low index) half to top
1026       set_req(loc - 1, in(loc));
1027     }
1028   }
1029   set_req(loc, c);
1030 }
1031 
1032 uint SafePointNode::size_of() const { return sizeof(*this); }
1033 uint SafePointNode::cmp( const Node &n ) const {


1497       } else {
1498         break;
1499       }
1500     } else {
1501       break; // found an interesting control
1502     }
1503   }
1504   return ctrl;
1505 }
1506 //
1507 // Given a control, see if it's the control projection of an Unlock which
1508 // operating on the same object as lock.
1509 //
1510 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
1511                                             GrowableArray<AbstractLockNode*> &lock_ops) {
1512   ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
1513   if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
1514     Node *n = ctrl_proj->in(0);
1515     if (n != NULL && n->is_Unlock()) {
1516       UnlockNode *unlock = n->as_Unlock();
1517       if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&








1518           BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) &&
1519           !unlock->is_eliminated()) {
1520         lock_ops.append(unlock);
1521         return true;
1522       }
1523     }
1524   }
1525   return false;
1526 }
1527 
1528 //
1529 // Find the lock matching an unlock.  Returns null if a safepoint
1530 // or complicated control is encountered first.
1531 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
1532   LockNode *lock_result = NULL;
1533   // find the matching lock, or an intervening safepoint
1534   Node *ctrl = next_control(unlock->in(0));
1535   while (1) {
1536     assert(ctrl != NULL, "invalid control graph");
1537     assert(!ctrl->is_Start(), "missing lock for unlock");


1542     } else if (ctrl->is_Region()) {
1543       // Check for a simple diamond pattern.  Punt on anything more complicated
1544       if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
1545         Node *in1 = next_control(ctrl->in(1));
1546         Node *in2 = next_control(ctrl->in(2));
1547         if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
1548              (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
1549           ctrl = next_control(in1->in(0)->in(0));
1550         } else {
1551           break;
1552         }
1553       } else {
1554         break;
1555       }
1556     } else {
1557       ctrl = next_control(ctrl->in(0));  // keep searching
1558     }
1559   }
1560   if (ctrl->is_Lock()) {
1561     LockNode *lock = ctrl->as_Lock();
1562     if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&








1563         BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) {
1564       lock_result = lock;
1565     }
1566   }
1567   return lock_result;
1568 }
1569 
1570 // This code corresponds to case 3 above.
1571 
1572 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1573                                                        GrowableArray<AbstractLockNode*> &lock_ops) {
1574   Node* if_node = node->in(0);
1575   bool  if_true = node->is_IfTrue();
1576 
1577   if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
1578     Node *lock_ctrl = next_control(if_node->in(0));
1579     if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
1580       Node* lock1_node = NULL;
1581       ProjNode* proj = if_node->as_If()->proj_out(!if_true);
1582       if (if_true) {
1583         if (proj->is_IfFalse() && proj->outcnt() == 1) {
1584           lock1_node = proj->unique_out();
1585         }
1586       } else {
1587         if (proj->is_IfTrue() && proj->outcnt() == 1) {
1588           lock1_node = proj->unique_out();
1589         }
1590       }
1591       if (lock1_node != NULL && lock1_node->is_Lock()) {
1592         LockNode *lock1 = lock1_node->as_Lock();
1593         if (lock->obj_node()->eqv_uncast(lock1->obj_node()) &&








1594             BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) &&
1595             !lock1->is_eliminated()) {
1596           lock_ops.append(lock1);
1597           return true;
1598         }
1599       }
1600     }
1601   }
1602 
1603   lock_ops.trunc_to(0);
1604   return false;
1605 }
1606 
1607 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1608                                GrowableArray<AbstractLockNode*> &lock_ops) {
1609   // check each control merging at this point for a matching unlock.
1610   // in(0) should be self edge so skip it.
1611   for (int i = 1; i < (int)region->req(); i++) {
1612     Node *in_node = next_control(region->in(i));
1613     if (in_node != NULL) {


1769 #endif
1770     return false; // External lock or it is not Box (Phi node).
1771   }
1772 
1773   // Ignore complex cases: merged locks or multiple locks.
1774   Node* obj = obj_node();
1775   LockNode* unique_lock = NULL;
1776   if (!box->is_simple_lock_region(&unique_lock, obj)) {
1777 #ifdef ASSERT
1778     this->log_lock_optimization(c, "eliminate_lock_INLR_2a");
1779 #endif
1780     return false;
1781   }
1782   if (unique_lock != this) {
1783 #ifdef ASSERT
1784     this->log_lock_optimization(c, "eliminate_lock_INLR_2b");
1785 #endif
1786     return false;
1787   }
1788 





1789   // Look for external lock for the same object.
1790   SafePointNode* sfn = this->as_SafePoint();
1791   JVMState* youngest_jvms = sfn->jvms();
1792   int max_depth = youngest_jvms->depth();
1793   for (int depth = 1; depth <= max_depth; depth++) {
1794     JVMState* jvms = youngest_jvms->of_depth(depth);
1795     int num_mon  = jvms->nof_monitors();
1796     // Loop over monitors
1797     for (int idx = 0; idx < num_mon; idx++) {
1798       Node* obj_node = sfn->monitor_obj(jvms, idx);





1799       BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
1800       if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
1801         return true;
1802       }
1803     }
1804   }
1805 #ifdef ASSERT
1806   this->log_lock_optimization(c, "eliminate_lock_INLR_3");
1807 #endif
1808   return false;
1809 }
1810 
1811 //=============================================================================
1812 uint UnlockNode::size_of() const { return sizeof(*this); }
1813 
1814 //=============================================================================
1815 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1816 
1817   // perform any generic optimizations first (returns 'this' or NULL)
1818   Node *result = SafePointNode::Ideal(phase, can_reshape);




  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "ci/bcEscapeAnalyzer.hpp"
  28 #include "compiler/oopMap.hpp"
  29 #include "opto/callGenerator.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/escape.hpp"
  32 #include "opto/locknode.hpp"
  33 #include "opto/machnode.hpp"
  34 #include "opto/matcher.hpp"
  35 #include "opto/parse.hpp"
  36 #include "opto/regalloc.hpp"
  37 #include "opto/regmask.hpp"
  38 #include "opto/rootnode.hpp"
  39 #include "opto/runtime.hpp"
  40 #if INCLUDE_ALL_GCS
  41 #include "gc_implementation/shenandoah/shenandoahBarrierSetC2.hpp"
  42 #endif
  43 
  44 // Portions of code courtesy of Clifford Click
  45 
  46 // Optimization - Graph Style
  47 
  48 //=============================================================================
  49 uint StartNode::size_of() const { return sizeof(*this); }
  50 uint StartNode::cmp( const Node &n ) const
  51 { return _domain == ((StartNode&)n)._domain; }
  52 const Type *StartNode::bottom_type() const { return _domain; }
  53 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
  54 #ifndef PRODUCT
  55 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
  56 #endif
  57 
  58 //------------------------------Ideal------------------------------------------
  59 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
  60   return remove_dead_region(phase, can_reshape) ? this : NULL;
  61 }
  62 


 793   for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
 794     Node *use = p->fast_out(i);
 795     if (use->is_CheckCastPP()) {
 796       if (cast != NULL) {
 797         return this;  // more than 1 CheckCastPP
 798       }
 799       cast = use;
 800     } else if (!use->is_Initialize() &&
 801                !use->is_AddP()) {
 802       // Expected uses are restricted to a CheckCastPP, an Initialize
 803       // node, and AddP nodes. If we encounter any other use (a Phi
 804       // node can be seen in rare cases) return this to prevent
 805       // incorrect optimizations.
 806       return this;
 807     }
 808   }
 809   return cast;
 810 }
 811 
 812 
 813 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
 814   projs->fallthrough_proj      = NULL;
 815   projs->fallthrough_catchproj = NULL;
 816   projs->fallthrough_ioproj    = NULL;
 817   projs->catchall_ioproj       = NULL;
 818   projs->catchall_catchproj    = NULL;
 819   projs->fallthrough_memproj   = NULL;
 820   projs->catchall_memproj      = NULL;
 821   projs->resproj               = NULL;
 822   projs->exobj                 = NULL;
 823 
 824   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 825     ProjNode *pn = fast_out(i)->as_Proj();
 826     if (pn->outcnt() == 0) continue;
 827     switch (pn->_con) {
 828     case TypeFunc::Control:
 829       {
 830         // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
 831         projs->fallthrough_proj = pn;
 832         DUIterator_Fast jmax, j = pn->fast_outs(jmax);
 833         const Node *cn = pn->fast_out(j);


 856         if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
 857           assert(projs->exobj == NULL, "only one");
 858           projs->exobj = e;
 859         }
 860       }
 861       break;
 862     case TypeFunc::Memory:
 863       if (pn->_is_io_use)
 864         projs->catchall_memproj = pn;
 865       else
 866         projs->fallthrough_memproj = pn;
 867       break;
 868     case TypeFunc::Parms:
 869       projs->resproj = pn;
 870       break;
 871     default:
 872       assert(false, "unexpected projection from allocation node.");
 873     }
 874   }
 875 
 876   // The resproj may not exist because the result could be ignored
 877   // and the exception object may not exist if an exception handler
 878   // swallows the exception but all the other must exist and be found.
 879   assert(projs->fallthrough_proj      != NULL, "must be found");
 880   do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
 881   assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
 882   assert(!do_asserts || projs->fallthrough_memproj   != NULL, "must be found");
 883   assert(!do_asserts || projs->fallthrough_ioproj    != NULL, "must be found");
 884   assert(!do_asserts || projs->catchall_catchproj    != NULL, "must be found");
 885   if (separate_io_proj) {
 886     assert(!do_asserts || projs->catchall_memproj    != NULL, "must be found");
 887     assert(!do_asserts || projs->catchall_ioproj     != NULL, "must be found");
 888   }
 889 }
 890 
 891 Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 892   CallGenerator* cg = generator();
 893   if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) {
 894     // Check whether this MH handle call becomes a candidate for inlining
 895     ciMethod* callee = cg->method();
 896     vmIntrinsics::ID iid = callee->intrinsic_id();
 897     if (iid == vmIntrinsics::_invokeBasic) {
 898       if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
 899         phase->C->prepend_late_inline(cg);
 900         set_generator(NULL);
 901       }
 902     } else {
 903       assert(callee->has_member_arg(), "wrong type of call?");
 904       if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
 905         phase->C->prepend_late_inline(cg);
 906         set_generator(NULL);
 907       }
 908     }
 909   }
 910   return SafePointNode::Ideal(phase, can_reshape);
 911 }
 912 

 913 //=============================================================================
 914 uint CallJavaNode::size_of() const { return sizeof(*this); }
 915 uint CallJavaNode::cmp( const Node &n ) const {
 916   CallJavaNode &call = (CallJavaNode&)n;
 917   return CallNode::cmp(call) && _method == call._method;
 918 }
 919 #ifndef PRODUCT
 920 void CallJavaNode::dump_spec(outputStream *st) const {
 921   if( _method ) _method->print_short_name(st);
 922   CallNode::dump_spec(st);
 923 }
 924 #endif
 925 
 926 //=============================================================================
 927 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
 928 uint CallStaticJavaNode::cmp( const Node &n ) const {
 929   CallStaticJavaNode &call = (CallStaticJavaNode&)n;
 930   return CallJavaNode::cmp(call);
 931 }
 932 


 984 
 985 //=============================================================================
 986 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
 987 uint CallRuntimeNode::cmp( const Node &n ) const {
 988   CallRuntimeNode &call = (CallRuntimeNode&)n;
 989   return CallNode::cmp(call) && !strcmp(_name,call._name);
 990 }
 991 #ifndef PRODUCT
 992 void CallRuntimeNode::dump_spec(outputStream *st) const {
 993   st->print("# ");
 994   st->print("%s", _name);
 995   CallNode::dump_spec(st);
 996 }
 997 #endif
 998 
 999 //------------------------------calling_convention-----------------------------
1000 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1001   Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
1002 }
1003 
1004 bool CallRuntimeNode::is_call_to_arraycopystub() const {
1005   if (_name != NULL && strstr(_name, "arraycopy") != 0) {
1006     return true;
1007   }
1008   return false;
1009 }
1010 
1011 //=============================================================================
1012 //------------------------------calling_convention-----------------------------
1013 
1014 
1015 //=============================================================================
1016 #ifndef PRODUCT
1017 void CallLeafNode::dump_spec(outputStream *st) const {
1018   st->print("# ");
1019   st->print("%s", _name);
1020   CallNode::dump_spec(st);
1021 }
1022 #endif
1023 
1024 Node *CallLeafNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1025   if (UseShenandoahGC && is_g1_wb_pre_call()) {
1026     uint cnt = OptoRuntime::g1_wb_pre_Type()->domain()->cnt();
1027     if (req() > cnt) {
1028       Node* addp = in(cnt);
1029       if (has_only_g1_wb_pre_uses(addp)) {
1030         del_req(cnt);
1031         if (can_reshape) {
1032           phase->is_IterGVN()->_worklist.push(addp);
1033         }
1034         return this;
1035       }
1036     }
1037   }
1038 
1039   return CallNode::Ideal(phase, can_reshape);
1040 }
1041 
1042 bool CallLeafNode::has_only_g1_wb_pre_uses(Node* n) {
1043   if (UseShenandoahGC) {
1044     return false;
1045   }
1046   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1047     Node* u = n->fast_out(i);
1048     if (!u->is_g1_wb_pre_call()) {
1049       return false;
1050     }
1051   }
1052   return n->outcnt() > 0;
1053 }
1054 
1055 //=============================================================================
1056 
1057 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1058   assert(verify_jvms(jvms), "jvms must match");
1059   int loc = jvms->locoff() + idx;
1060   if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1061     // If current local idx is top then local idx - 1 could
1062     // be a long/double that needs to be killed since top could
1063     // represent the 2nd half ofthe long/double.
1064     uint ideal = in(loc -1)->ideal_reg();
1065     if (ideal == Op_RegD || ideal == Op_RegL) {
1066       // set other (low index) half to top
1067       set_req(loc - 1, in(loc));
1068     }
1069   }
1070   set_req(loc, c);
1071 }
1072 
1073 uint SafePointNode::size_of() const { return sizeof(*this); }
1074 uint SafePointNode::cmp( const Node &n ) const {


1538       } else {
1539         break;
1540       }
1541     } else {
1542       break; // found an interesting control
1543     }
1544   }
1545   return ctrl;
1546 }
1547 //
1548 // Given a control, see if it's the control projection of an Unlock which
1549 // operating on the same object as lock.
1550 //
1551 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
1552                                             GrowableArray<AbstractLockNode*> &lock_ops) {
1553   ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
1554   if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
1555     Node *n = ctrl_proj->in(0);
1556     if (n != NULL && n->is_Unlock()) {
1557       UnlockNode *unlock = n->as_Unlock();
1558       Node* lock_obj = lock->obj_node();
1559       Node* unlock_obj = unlock->obj_node();
1560 #if INCLUDE_ALL_GCS
1561       if (UseShenandoahGC) {
1562         lock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(lock_obj);
1563         unlock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(unlock_obj);
1564       }
1565 #endif
1566       if (lock_obj->eqv_uncast(unlock_obj) &&
1567           BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) &&
1568           !unlock->is_eliminated()) {
1569         lock_ops.append(unlock);
1570         return true;
1571       }
1572     }
1573   }
1574   return false;
1575 }
1576 
1577 //
1578 // Find the lock matching an unlock.  Returns null if a safepoint
1579 // or complicated control is encountered first.
1580 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
1581   LockNode *lock_result = NULL;
1582   // find the matching lock, or an intervening safepoint
1583   Node *ctrl = next_control(unlock->in(0));
1584   while (1) {
1585     assert(ctrl != NULL, "invalid control graph");
1586     assert(!ctrl->is_Start(), "missing lock for unlock");


1591     } else if (ctrl->is_Region()) {
1592       // Check for a simple diamond pattern.  Punt on anything more complicated
1593       if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
1594         Node *in1 = next_control(ctrl->in(1));
1595         Node *in2 = next_control(ctrl->in(2));
1596         if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
1597              (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
1598           ctrl = next_control(in1->in(0)->in(0));
1599         } else {
1600           break;
1601         }
1602       } else {
1603         break;
1604       }
1605     } else {
1606       ctrl = next_control(ctrl->in(0));  // keep searching
1607     }
1608   }
1609   if (ctrl->is_Lock()) {
1610     LockNode *lock = ctrl->as_Lock();
1611     Node* lock_obj = lock->obj_node();
1612     Node* unlock_obj = unlock->obj_node();
1613 #if INCLUDE_ALL_GCS
1614     if (UseShenandoahGC) {
1615       lock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(lock_obj);
1616       unlock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(unlock_obj);
1617     }
1618 #endif
1619     if (lock_obj->eqv_uncast(unlock_obj) &&
1620         BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) {
1621       lock_result = lock;
1622     }
1623   }
1624   return lock_result;
1625 }
1626 
1627 // This code corresponds to case 3 above.
1628 
1629 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1630                                                        GrowableArray<AbstractLockNode*> &lock_ops) {
1631   Node* if_node = node->in(0);
1632   bool  if_true = node->is_IfTrue();
1633 
1634   if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
1635     Node *lock_ctrl = next_control(if_node->in(0));
1636     if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
1637       Node* lock1_node = NULL;
1638       ProjNode* proj = if_node->as_If()->proj_out(!if_true);
1639       if (if_true) {
1640         if (proj->is_IfFalse() && proj->outcnt() == 1) {
1641           lock1_node = proj->unique_out();
1642         }
1643       } else {
1644         if (proj->is_IfTrue() && proj->outcnt() == 1) {
1645           lock1_node = proj->unique_out();
1646         }
1647       }
1648       if (lock1_node != NULL && lock1_node->is_Lock()) {
1649         LockNode *lock1 = lock1_node->as_Lock();
1650         Node* lock_obj = lock->obj_node();
1651         Node* lock1_obj = lock1->obj_node();
1652 #if INCLUDE_ALL_GCS
1653         if (UseShenandoahGC) {
1654           lock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(lock_obj);
1655           lock1_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(lock1_obj);
1656         }
1657 #endif
1658         if (lock_obj->eqv_uncast(lock1_obj) &&
1659             BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) &&
1660             !lock1->is_eliminated()) {
1661           lock_ops.append(lock1);
1662           return true;
1663         }
1664       }
1665     }
1666   }
1667 
1668   lock_ops.trunc_to(0);
1669   return false;
1670 }
1671 
1672 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1673                                GrowableArray<AbstractLockNode*> &lock_ops) {
1674   // check each control merging at this point for a matching unlock.
1675   // in(0) should be self edge so skip it.
1676   for (int i = 1; i < (int)region->req(); i++) {
1677     Node *in_node = next_control(region->in(i));
1678     if (in_node != NULL) {


1834 #endif
1835     return false; // External lock or it is not Box (Phi node).
1836   }
1837 
1838   // Ignore complex cases: merged locks or multiple locks.
1839   Node* obj = obj_node();
1840   LockNode* unique_lock = NULL;
1841   if (!box->is_simple_lock_region(&unique_lock, obj)) {
1842 #ifdef ASSERT
1843     this->log_lock_optimization(c, "eliminate_lock_INLR_2a");
1844 #endif
1845     return false;
1846   }
1847   if (unique_lock != this) {
1848 #ifdef ASSERT
1849     this->log_lock_optimization(c, "eliminate_lock_INLR_2b");
1850 #endif
1851     return false;
1852   }
1853 
1854 #if INCLUDE_ALL_GCS
1855   if (UseShenandoahGC) {
1856     obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(obj);
1857   }
1858 #endif
1859   // Look for external lock for the same object.
1860   SafePointNode* sfn = this->as_SafePoint();
1861   JVMState* youngest_jvms = sfn->jvms();
1862   int max_depth = youngest_jvms->depth();
1863   for (int depth = 1; depth <= max_depth; depth++) {
1864     JVMState* jvms = youngest_jvms->of_depth(depth);
1865     int num_mon  = jvms->nof_monitors();
1866     // Loop over monitors
1867     for (int idx = 0; idx < num_mon; idx++) {
1868       Node* obj_node = sfn->monitor_obj(jvms, idx);
1869 #if INCLUDE_ALL_GCS
1870       if (UseShenandoahGC) {
1871         obj_node = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(obj_node);
1872       }
1873 #endif
1874       BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
1875       if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
1876         return true;
1877       }
1878     }
1879   }
1880 #ifdef ASSERT
1881   this->log_lock_optimization(c, "eliminate_lock_INLR_3");
1882 #endif
1883   return false;
1884 }
1885 
1886 //=============================================================================
1887 uint UnlockNode::size_of() const { return sizeof(*this); }
1888 
1889 //=============================================================================
1890 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1891 
1892   // perform any generic optimizations first (returns 'this' or NULL)
1893   Node *result = SafePointNode::Ideal(phase, can_reshape);


< prev index next >