< prev index next >

src/share/vm/opto/matcher.cpp

Print this page




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "opto/addnode.hpp"
  28 #include "opto/callnode.hpp"
  29 #include "opto/connode.hpp"
  30 #include "opto/idealGraphPrinter.hpp"
  31 #include "opto/matcher.hpp"
  32 #include "opto/memnode.hpp"
  33 #include "opto/opcodes.hpp"
  34 #include "opto/regmask.hpp"
  35 #include "opto/rootnode.hpp"
  36 #include "opto/runtime.hpp"

  37 #include "opto/type.hpp"
  38 #include "opto/vectornode.hpp"
  39 #include "runtime/atomic.hpp"
  40 #include "runtime/os.hpp"
  41 #if defined AD_MD_HPP
  42 # include AD_MD_HPP
  43 #elif defined TARGET_ARCH_MODEL_x86_32
  44 # include "adfiles/ad_x86_32.hpp"
  45 #elif defined TARGET_ARCH_MODEL_x86_64
  46 # include "adfiles/ad_x86_64.hpp"
  47 #elif defined TARGET_ARCH_MODEL_aarch64
  48 # include "adfiles/ad_aarch64.hpp"
  49 #elif defined TARGET_ARCH_MODEL_sparc
  50 # include "adfiles/ad_sparc.hpp"
  51 #elif defined TARGET_ARCH_MODEL_zero
  52 # include "adfiles/ad_zero.hpp"
  53 #elif defined TARGET_ARCH_MODEL_ppc_64
  54 # include "adfiles/ad_ppc_64.hpp"
  55 #endif
  56 


1004     C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1005     if (C->failing()) return NULL;
1006     n = mstack.node();          // Leave node on stack
1007     Node_State nstate = mstack.state();
1008     if (nstate == Visit) {
1009       mstack.set_state(Post_Visit);
1010       Node *oldn = n;
1011       // Old-space or new-space check
1012       if (!C->node_arena()->contains(n)) {
1013         // Old space!
1014         Node* m;
1015         if (has_new_node(n)) {  // Not yet Label/Reduced
1016           m = new_node(n);
1017         } else {
1018           if (!is_dontcare(n)) { // Matcher can match this guy
1019             // Calls match special.  They match alone with no children.
1020             // Their children, the incoming arguments, match normally.
1021             m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1022             if (C->failing())  return NULL;
1023             if (m == NULL) { Matcher::soft_match_failure(); return NULL; }



1024           } else {                  // Nothing the matcher cares about
1025             if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) {       // Projections?
1026               // Convert to machine-dependent projection
1027               m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1028 #ifdef ASSERT
1029               _new2old_map.map(m->_idx, n);
1030 #endif
1031               if (m->in(0) != NULL) // m might be top
1032                 collect_null_checks(m, n);
1033             } else {                // Else just a regular 'ol guy
1034               m = n->clone();       // So just clone into new-space
1035 #ifdef ASSERT
1036               _new2old_map.map(m->_idx, n);
1037 #endif
1038               // Def-Use edges will be added incrementally as Uses
1039               // of this node are matched.
1040               assert(m->outcnt() == 0, "no Uses of this clone yet");
1041             }
1042           }
1043 


1048             C->set_node_notes_at(m->_idx, nn);
1049           }
1050           debug_only(match_alias_type(C, n, m));
1051         }
1052         n = m;    // n is now a new-space node
1053         mstack.set_node(n);
1054       }
1055 
1056       // New space!
1057       if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1058 
1059       int i;
1060       // Put precedence edges on stack first (match them last).
1061       for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1062         Node *m = oldn->in(i);
1063         if (m == NULL) break;
1064         // set -1 to call add_prec() instead of set_req() during Step1
1065         mstack.push(m, Visit, n, -1);
1066       }
1067 









1068       // For constant debug info, I'd rather have unmatched constants.
1069       int cnt = n->req();
1070       JVMState* jvms = n->jvms();
1071       int debug_cnt = jvms ? jvms->debug_start() : cnt;
1072 
1073       // Now do only debug info.  Clone constants rather than matching.
1074       // Constants are represented directly in the debug info without
1075       // the need for executable machine instructions.
1076       // Monitor boxes are also represented directly.
1077       for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1078         Node *m = n->in(i);          // Get input
1079         int op = m->Opcode();
1080         assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1081         if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1082             op == Op_ConF || op == Op_ConD || op == Op_ConL
1083             // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
1084             ) {
1085           m = m->clone();
1086 #ifdef ASSERT
1087           _new2old_map.map(m->_idx, n);


1738 
1739   // PhaseChaitin::fixup_spills will sometimes generate spill code
1740   // via the matcher.  By the time, nodes have been wired into the CFG,
1741   // and any further nodes generated by expand rules will be left hanging
1742   // in space, and will not get emitted as output code.  Catch this.
1743   // Also, catch any new register allocation constraints ("projections")
1744   // generated belatedly during spill code generation.
1745   if (_allocation_started) {
1746     guarantee(ex == mach, "no expand rules during spill generation");
1747     guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1748   }
1749 
1750   if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1751     // Record the con for sharing
1752     _shared_nodes.map(leaf->_idx, ex);
1753   }
1754 
1755   return ex;
1756 }
1757 








1758 void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
1759   // 'op' is what I am expecting to receive
1760   int op = _leftOp[rule];
1761   // Operand type to catch childs result
1762   // This is what my child will give me.
1763   int opnd_class_instance = s->_rule[op];
1764   // Choose between operand class or not.
1765   // This is what I will receive.
1766   int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1767   // New rule for child.  Chase operand classes to get the actual rule.
1768   int newrule = s->_rule[catch_op];
1769 
1770   if( newrule < NUM_OPERANDS ) {
1771     // Chain from operand or operand class, may be output of shared node
1772     assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
1773             "Bad AD file: Instruction chain rule must chain from operand");
1774     // Insert operand into array of operands for this instruction
1775     mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
1776 
1777     ReduceOper( s, newrule, mem, mach );
1778   } else {
1779     // Chain from the result of an instruction
1780     assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1781     mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
1782     Node *mem1 = (Node*)1;
1783     debug_only(Node *save_mem_node = _mem_node;)
1784     mach->add_req( ReduceInst(s, newrule, mem1) );
1785     debug_only(_mem_node = save_mem_node;)
1786   }
1787   return;
1788 }
1789 
1790 
1791 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {


1792   if( s->_leaf->is_Load() ) {
1793     Node *mem2 = s->_leaf->in(MemNode::Memory);
1794     assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1795     debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1796     mem = mem2;
1797   }
1798   if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1799     if( mach->in(0) == NULL )
1800       mach->set_req(0, s->_leaf->in(0));
1801   }
1802 
1803   // Now recursively walk the state tree & add operand list.
1804   for( uint i=0; i<2; i++ ) {   // binary tree
1805     State *newstate = s->_kids[i];
1806     if( newstate == NULL ) break;      // Might only have 1 child
1807     // 'op' is what I am expecting to receive
1808     int op;
1809     if( i == 0 ) {
1810       op = _leftOp[rule];
1811     } else {


1854 //     Skip over it ( do nothing )
1855 // (3) Child is an instruction -
1856 //     Call ReduceInst recursively and
1857 //     and instruction as an input to the MachNode
1858 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1859   assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1860   State *kid = s->_kids[0];
1861   assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1862 
1863   // Leaf?  And not subsumed?
1864   if( kid == NULL && !_swallowed[rule] ) {
1865     mach->add_req( s->_leaf );  // Add leaf pointer
1866     return;                     // Bail out
1867   }
1868 
1869   if( s->_leaf->is_Load() ) {
1870     assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1871     mem = s->_leaf->in(MemNode::Memory);
1872     debug_only(_mem_node = s->_leaf;)
1873   }



1874   if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1875     if( !mach->in(0) )
1876       mach->set_req(0,s->_leaf->in(0));
1877     else {
1878       assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1879     }
1880   }
1881 
1882   for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) {   // binary tree
1883     int newrule;
1884     if( i == 0)
1885       newrule = kid->_rule[_leftOp[rule]];
1886     else
1887       newrule = kid->_rule[_rightOp[rule]];
1888 
1889     if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
1890       // Internal operand; recurse but do nothing else
1891       ReduceOper( kid, newrule, mem, mach );
1892 
1893     } else {                    // Child is a new instruction


2113         const TypePtr* tp = tn->type()->is_ptr();
2114         if (tp->_ptr == TypePtr::AnyNull) {
2115           tn->set_type(TypePtr::NULL_PTR);
2116         }
2117         break;
2118       }
2119       case Op_ConN: {  // Convert narrow pointers above the centerline to NUL
2120         TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2121         const TypePtr* tp = tn->type()->make_ptr();
2122         if (tp && tp->_ptr == TypePtr::AnyNull) {
2123           tn->set_type(TypeNarrowOop::NULL_PTR);
2124         }
2125         break;
2126       }
2127       case Op_Binary:         // These are introduced in the Post_Visit state.
2128         ShouldNotReachHere();
2129         break;
2130       case Op_ClearArray:
2131       case Op_SafePoint:
2132         mem_op = true;







2133         break;
2134       default:
2135         if( n->is_Store() ) {
2136           // Do match stores, despite no ideal reg
2137           mem_op = true;
2138           break;
2139         }
2140         if( n->is_Mem() ) { // Loads and LoadStores
2141           mem_op = true;
2142           // Loads must be root of match tree due to prior load conflict
2143           if( C->subsume_loads() == false )
2144             set_shared(n);
2145         }
2146         // Fall into default case
2147         if( !n->ideal_reg() )
2148           set_dontcare(n);  // Unmatchable Nodes
2149       } // end_switch
2150 
2151       for(int i = n->req() - 1; i >= 0; --i) { // For my children
2152         Node *m = n->in(i); // Get ith input




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "opto/addnode.hpp"
  28 #include "opto/callnode.hpp"
  29 #include "opto/connode.hpp"
  30 #include "opto/idealGraphPrinter.hpp"
  31 #include "opto/matcher.hpp"
  32 #include "opto/memnode.hpp"
  33 #include "opto/opcodes.hpp"
  34 #include "opto/regmask.hpp"
  35 #include "opto/rootnode.hpp"
  36 #include "opto/runtime.hpp"
  37 #include "opto/shenandoahSupport.hpp"
  38 #include "opto/type.hpp"
  39 #include "opto/vectornode.hpp"
  40 #include "runtime/atomic.hpp"
  41 #include "runtime/os.hpp"
  42 #if defined AD_MD_HPP
  43 # include AD_MD_HPP
  44 #elif defined TARGET_ARCH_MODEL_x86_32
  45 # include "adfiles/ad_x86_32.hpp"
  46 #elif defined TARGET_ARCH_MODEL_x86_64
  47 # include "adfiles/ad_x86_64.hpp"
  48 #elif defined TARGET_ARCH_MODEL_aarch64
  49 # include "adfiles/ad_aarch64.hpp"
  50 #elif defined TARGET_ARCH_MODEL_sparc
  51 # include "adfiles/ad_sparc.hpp"
  52 #elif defined TARGET_ARCH_MODEL_zero
  53 # include "adfiles/ad_zero.hpp"
  54 #elif defined TARGET_ARCH_MODEL_ppc_64
  55 # include "adfiles/ad_ppc_64.hpp"
  56 #endif
  57 


1005     C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1006     if (C->failing()) return NULL;
1007     n = mstack.node();          // Leave node on stack
1008     Node_State nstate = mstack.state();
1009     if (nstate == Visit) {
1010       mstack.set_state(Post_Visit);
1011       Node *oldn = n;
1012       // Old-space or new-space check
1013       if (!C->node_arena()->contains(n)) {
1014         // Old space!
1015         Node* m;
1016         if (has_new_node(n)) {  // Not yet Label/Reduced
1017           m = new_node(n);
1018         } else {
1019           if (!is_dontcare(n)) { // Matcher can match this guy
1020             // Calls match special.  They match alone with no children.
1021             // Their children, the incoming arguments, match normally.
1022             m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1023             if (C->failing())  return NULL;
1024             if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
1025             if (n->is_MemBar() && UseShenandoahGC) {
1026               m->as_MachMemBar()->set_adr_type(n->adr_type());
1027             }
1028           } else {                  // Nothing the matcher cares about
1029             if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) {       // Projections?
1030               // Convert to machine-dependent projection
1031               m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1032 #ifdef ASSERT
1033               _new2old_map.map(m->_idx, n);
1034 #endif
1035               if (m->in(0) != NULL) // m might be top
1036                 collect_null_checks(m, n);
1037             } else {                // Else just a regular 'ol guy
1038               m = n->clone();       // So just clone into new-space
1039 #ifdef ASSERT
1040               _new2old_map.map(m->_idx, n);
1041 #endif
1042               // Def-Use edges will be added incrementally as Uses
1043               // of this node are matched.
1044               assert(m->outcnt() == 0, "no Uses of this clone yet");
1045             }
1046           }
1047 


1052             C->set_node_notes_at(m->_idx, nn);
1053           }
1054           debug_only(match_alias_type(C, n, m));
1055         }
1056         n = m;    // n is now a new-space node
1057         mstack.set_node(n);
1058       }
1059 
1060       // New space!
1061       if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1062 
1063       int i;
1064       // Put precedence edges on stack first (match them last).
1065       for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1066         Node *m = oldn->in(i);
1067         if (m == NULL) break;
1068         // set -1 to call add_prec() instead of set_req() during Step1
1069         mstack.push(m, Visit, n, -1);
1070       }
1071 
1072       // Handle precedence edges for interior nodes
1073       for (i = n->len()-1; (uint)i >= n->req(); i--) {
1074         Node *m = n->in(i);
1075         if (m == NULL || C->node_arena()->contains(m)) continue;
1076         n->rm_prec(i);
1077         // set -1 to call add_prec() instead of set_req() during Step1
1078         mstack.push(m, Visit, n, -1);
1079       }
1080 
1081       // For constant debug info, I'd rather have unmatched constants.
1082       int cnt = n->req();
1083       JVMState* jvms = n->jvms();
1084       int debug_cnt = jvms ? jvms->debug_start() : cnt;
1085 
1086       // Now do only debug info.  Clone constants rather than matching.
1087       // Constants are represented directly in the debug info without
1088       // the need for executable machine instructions.
1089       // Monitor boxes are also represented directly.
1090       for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1091         Node *m = n->in(i);          // Get input
1092         int op = m->Opcode();
1093         assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1094         if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1095             op == Op_ConF || op == Op_ConD || op == Op_ConL
1096             // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
1097             ) {
1098           m = m->clone();
1099 #ifdef ASSERT
1100           _new2old_map.map(m->_idx, n);


1751 
1752   // PhaseChaitin::fixup_spills will sometimes generate spill code
1753   // via the matcher.  By the time, nodes have been wired into the CFG,
1754   // and any further nodes generated by expand rules will be left hanging
1755   // in space, and will not get emitted as output code.  Catch this.
1756   // Also, catch any new register allocation constraints ("projections")
1757   // generated belatedly during spill code generation.
1758   if (_allocation_started) {
1759     guarantee(ex == mach, "no expand rules during spill generation");
1760     guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1761   }
1762 
1763   if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1764     // Record the con for sharing
1765     _shared_nodes.map(leaf->_idx, ex);
1766   }
1767 
1768   return ex;
1769 }
1770 
1771 void Matcher::handle_precedence_edges(Node* n, MachNode *mach) {
1772   for (uint i = n->req(); i < n->len(); i++) {
1773     if (n->in(i) != NULL) {
1774       mach->add_prec(n->in(i));
1775     }
1776   }
1777 }
1778 
1779 void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
1780   // 'op' is what I am expecting to receive
1781   int op = _leftOp[rule];
1782   // Operand type to catch childs result
1783   // This is what my child will give me.
1784   int opnd_class_instance = s->_rule[op];
1785   // Choose between operand class or not.
1786   // This is what I will receive.
1787   int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1788   // New rule for child.  Chase operand classes to get the actual rule.
1789   int newrule = s->_rule[catch_op];
1790 
1791   if( newrule < NUM_OPERANDS ) {
1792     // Chain from operand or operand class, may be output of shared node
1793     assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
1794             "Bad AD file: Instruction chain rule must chain from operand");
1795     // Insert operand into array of operands for this instruction
1796     mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
1797 
1798     ReduceOper( s, newrule, mem, mach );
1799   } else {
1800     // Chain from the result of an instruction
1801     assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1802     mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
1803     Node *mem1 = (Node*)1;
1804     debug_only(Node *save_mem_node = _mem_node;)
1805     mach->add_req( ReduceInst(s, newrule, mem1) );
1806     debug_only(_mem_node = save_mem_node;)
1807   }
1808   return;
1809 }
1810 
1811 
1812 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1813   handle_precedence_edges(s->_leaf, mach);
1814 
1815   if( s->_leaf->is_Load() ) {
1816     Node *mem2 = s->_leaf->in(MemNode::Memory);
1817     assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1818     debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1819     mem = mem2;
1820   }
1821   if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1822     if( mach->in(0) == NULL )
1823       mach->set_req(0, s->_leaf->in(0));
1824   }
1825 
1826   // Now recursively walk the state tree & add operand list.
1827   for( uint i=0; i<2; i++ ) {   // binary tree
1828     State *newstate = s->_kids[i];
1829     if( newstate == NULL ) break;      // Might only have 1 child
1830     // 'op' is what I am expecting to receive
1831     int op;
1832     if( i == 0 ) {
1833       op = _leftOp[rule];
1834     } else {


1877 //     Skip over it ( do nothing )
1878 // (3) Child is an instruction -
1879 //     Call ReduceInst recursively and
1880 //     and instruction as an input to the MachNode
1881 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1882   assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1883   State *kid = s->_kids[0];
1884   assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1885 
1886   // Leaf?  And not subsumed?
1887   if( kid == NULL && !_swallowed[rule] ) {
1888     mach->add_req( s->_leaf );  // Add leaf pointer
1889     return;                     // Bail out
1890   }
1891 
1892   if( s->_leaf->is_Load() ) {
1893     assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1894     mem = s->_leaf->in(MemNode::Memory);
1895     debug_only(_mem_node = s->_leaf;)
1896   }
1897 
1898   handle_precedence_edges(s->_leaf, mach);
1899 
1900   if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1901     if( !mach->in(0) )
1902       mach->set_req(0,s->_leaf->in(0));
1903     else {
1904       assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1905     }
1906   }
1907 
1908   for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) {   // binary tree
1909     int newrule;
1910     if( i == 0)
1911       newrule = kid->_rule[_leftOp[rule]];
1912     else
1913       newrule = kid->_rule[_rightOp[rule]];
1914 
1915     if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
1916       // Internal operand; recurse but do nothing else
1917       ReduceOper( kid, newrule, mem, mach );
1918 
1919     } else {                    // Child is a new instruction


2139         const TypePtr* tp = tn->type()->is_ptr();
2140         if (tp->_ptr == TypePtr::AnyNull) {
2141           tn->set_type(TypePtr::NULL_PTR);
2142         }
2143         break;
2144       }
2145       case Op_ConN: {  // Convert narrow pointers above the centerline to NUL
2146         TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2147         const TypePtr* tp = tn->type()->make_ptr();
2148         if (tp && tp->_ptr == TypePtr::AnyNull) {
2149           tn->set_type(TypeNarrowOop::NULL_PTR);
2150         }
2151         break;
2152       }
2153       case Op_Binary:         // These are introduced in the Post_Visit state.
2154         ShouldNotReachHere();
2155         break;
2156       case Op_ClearArray:
2157       case Op_SafePoint:
2158         mem_op = true;
2159         break;
2160       case Op_ShenandoahReadBarrier:
2161         if (n->in(ShenandoahBarrierNode::ValueIn)->is_DecodeNarrowPtr()) {
2162           set_shared(n->in(ShenandoahBarrierNode::ValueIn)->in(1));
2163         }
2164         mem_op = true;
2165         set_shared(n);
2166         break;
2167       default:
2168         if( n->is_Store() ) {
2169           // Do match stores, despite no ideal reg
2170           mem_op = true;
2171           break;
2172         }
2173         if( n->is_Mem() ) { // Loads and LoadStores
2174           mem_op = true;
2175           // Loads must be root of match tree due to prior load conflict
2176           if( C->subsume_loads() == false )
2177             set_shared(n);
2178         }
2179         // Fall into default case
2180         if( !n->ideal_reg() )
2181           set_dontcare(n);  // Unmatchable Nodes
2182       } // end_switch
2183 
2184       for(int i = n->req() - 1; i >= 0; --i) { // For my children
2185         Node *m = n->in(i); // Get ith input


< prev index next >