< prev index next >

src/share/vm/opto/matcher.cpp

Print this page




  27 #include "opto/addnode.hpp"
  28 #include "opto/callnode.hpp"
  29 #include "opto/connode.hpp"
  30 #include "opto/idealGraphPrinter.hpp"
  31 #include "opto/matcher.hpp"
  32 #include "opto/memnode.hpp"
  33 #include "opto/opcodes.hpp"
  34 #include "opto/regmask.hpp"
  35 #include "opto/rootnode.hpp"
  36 #include "opto/runtime.hpp"
  37 #include "opto/type.hpp"
  38 #include "opto/vectornode.hpp"
  39 #include "runtime/atomic.hpp"
  40 #include "runtime/os.hpp"
  41 #if defined AD_MD_HPP
  42 # include AD_MD_HPP
  43 #elif defined TARGET_ARCH_MODEL_x86_32
  44 # include "adfiles/ad_x86_32.hpp"
  45 #elif defined TARGET_ARCH_MODEL_x86_64
  46 # include "adfiles/ad_x86_64.hpp"


  47 #elif defined TARGET_ARCH_MODEL_sparc
  48 # include "adfiles/ad_sparc.hpp"
  49 #elif defined TARGET_ARCH_MODEL_zero
  50 # include "adfiles/ad_zero.hpp"
  51 #elif defined TARGET_ARCH_MODEL_ppc_64
  52 # include "adfiles/ad_ppc_64.hpp"
  53 #endif



  54 
  55 OptoReg::Name OptoReg::c_frame_pointer;
  56 
  57 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
  58 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
  59 RegMask Matcher::STACK_ONLY_mask;
  60 RegMask Matcher::c_frame_ptr_mask;
  61 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
  62 const uint Matcher::_end_rematerialize   = _END_REMATERIALIZE;
  63 
  64 //---------------------------Matcher-------------------------------------------
  65 Matcher::Matcher()
  66 : PhaseTransform( Phase::Ins_Select ),
  67 #ifdef ASSERT
  68   _old2new_map(C->comp_arena()),
  69   _new2old_map(C->comp_arena()),
  70 #endif
  71   _shared_nodes(C->comp_arena()),
  72   _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
  73   _swallowed(swallowed),


1005     C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1006     if (C->failing()) return NULL;
1007     n = mstack.node();          // Leave node on stack
1008     Node_State nstate = mstack.state();
1009     if (nstate == Visit) {
1010       mstack.set_state(Post_Visit);
1011       Node *oldn = n;
1012       // Old-space or new-space check
1013       if (!C->node_arena()->contains(n)) {
1014         // Old space!
1015         Node* m;
1016         if (has_new_node(n)) {  // Not yet Label/Reduced
1017           m = new_node(n);
1018         } else {
1019           if (!is_dontcare(n)) { // Matcher can match this guy
1020             // Calls match special.  They match alone with no children.
1021             // Their children, the incoming arguments, match normally.
1022             m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1023             if (C->failing())  return NULL;
1024             if (m == NULL) { Matcher::soft_match_failure(); return NULL; }



1025           } else {                  // Nothing the matcher cares about
1026             if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) {       // Projections?
1027               // Convert to machine-dependent projection
1028               m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1029 #ifdef ASSERT
1030               _new2old_map.map(m->_idx, n);
1031 #endif
1032               if (m->in(0) != NULL) // m might be top
1033                 collect_null_checks(m, n);
1034             } else {                // Else just a regular 'ol guy
1035               m = n->clone();       // So just clone into new-space
1036 #ifdef ASSERT
1037               _new2old_map.map(m->_idx, n);
1038 #endif
1039               // Def-Use edges will be added incrementally as Uses
1040               // of this node are matched.
1041               assert(m->outcnt() == 0, "no Uses of this clone yet");
1042             }
1043           }
1044 


1049             C->set_node_notes_at(m->_idx, nn);
1050           }
1051           debug_only(match_alias_type(C, n, m));
1052         }
1053         n = m;    // n is now a new-space node
1054         mstack.set_node(n);
1055       }
1056 
1057       // New space!
1058       if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1059 
1060       int i;
1061       // Put precedence edges on stack first (match them last).
1062       for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1063         Node *m = oldn->in(i);
1064         if (m == NULL) break;
1065         // set -1 to call add_prec() instead of set_req() during Step1
1066         mstack.push(m, Visit, n, -1);
1067       }
1068 









1069       // For constant debug info, I'd rather have unmatched constants.
1070       int cnt = n->req();
1071       JVMState* jvms = n->jvms();
1072       int debug_cnt = jvms ? jvms->debug_start() : cnt;
1073 
1074       // Now do only debug info.  Clone constants rather than matching.
1075       // Constants are represented directly in the debug info without
1076       // the need for executable machine instructions.
1077       // Monitor boxes are also represented directly.
1078       for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1079         Node *m = n->in(i);          // Get input
1080         int op = m->Opcode();
1081         assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1082         if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1083             op == Op_ConF || op == Op_ConD || op == Op_ConL
1084             // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
1085             ) {
1086           m = m->clone();
1087 #ifdef ASSERT
1088           _new2old_map.map(m->_idx, n);


1739 
1740   // PhaseChaitin::fixup_spills will sometimes generate spill code
1741   // via the matcher.  By the time, nodes have been wired into the CFG,
1742   // and any further nodes generated by expand rules will be left hanging
1743   // in space, and will not get emitted as output code.  Catch this.
1744   // Also, catch any new register allocation constraints ("projections")
1745   // generated belatedly during spill code generation.
1746   if (_allocation_started) {
1747     guarantee(ex == mach, "no expand rules during spill generation");
1748     guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1749   }
1750 
1751   if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1752     // Record the con for sharing
1753     _shared_nodes.map(leaf->_idx, ex);
1754   }
1755 
1756   return ex;
1757 }
1758 








1759 void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
1760   // 'op' is what I am expecting to receive
1761   int op = _leftOp[rule];
1762   // Operand type to catch childs result
1763   // This is what my child will give me.
1764   int opnd_class_instance = s->_rule[op];
1765   // Choose between operand class or not.
1766   // This is what I will receive.
1767   int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1768   // New rule for child.  Chase operand classes to get the actual rule.
1769   int newrule = s->_rule[catch_op];
1770 
1771   if( newrule < NUM_OPERANDS ) {
1772     // Chain from operand or operand class, may be output of shared node
1773     assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
1774             "Bad AD file: Instruction chain rule must chain from operand");
1775     // Insert operand into array of operands for this instruction
1776     mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
1777 
1778     ReduceOper( s, newrule, mem, mach );
1779   } else {
1780     // Chain from the result of an instruction
1781     assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1782     mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
1783     Node *mem1 = (Node*)1;
1784     debug_only(Node *save_mem_node = _mem_node;)
1785     mach->add_req( ReduceInst(s, newrule, mem1) );
1786     debug_only(_mem_node = save_mem_node;)
1787   }
1788   return;
1789 }
1790 
1791 
1792 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {


1793   if( s->_leaf->is_Load() ) {
1794     Node *mem2 = s->_leaf->in(MemNode::Memory);
1795     assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1796     debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1797     mem = mem2;
1798   }
1799   if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1800     if( mach->in(0) == NULL )
1801       mach->set_req(0, s->_leaf->in(0));
1802   }
1803 
1804   // Now recursively walk the state tree & add operand list.
1805   for( uint i=0; i<2; i++ ) {   // binary tree
1806     State *newstate = s->_kids[i];
1807     if( newstate == NULL ) break;      // Might only have 1 child
1808     // 'op' is what I am expecting to receive
1809     int op;
1810     if( i == 0 ) {
1811       op = _leftOp[rule];
1812     } else {


1855 //     Skip over it ( do nothing )
1856 // (3) Child is an instruction -
1857 //     Call ReduceInst recursively and
1858 //     and instruction as an input to the MachNode
1859 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1860   assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1861   State *kid = s->_kids[0];
1862   assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1863 
1864   // Leaf?  And not subsumed?
1865   if( kid == NULL && !_swallowed[rule] ) {
1866     mach->add_req( s->_leaf );  // Add leaf pointer
1867     return;                     // Bail out
1868   }
1869 
1870   if( s->_leaf->is_Load() ) {
1871     assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1872     mem = s->_leaf->in(MemNode::Memory);
1873     debug_only(_mem_node = s->_leaf;)
1874   }



1875   if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1876     if( !mach->in(0) )
1877       mach->set_req(0,s->_leaf->in(0));
1878     else {
1879       assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1880     }
1881   }
1882 
1883   for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) {   // binary tree
1884     int newrule;
1885     if( i == 0)
1886       newrule = kid->_rule[_leftOp[rule]];
1887     else
1888       newrule = kid->_rule[_rightOp[rule]];
1889 
1890     if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
1891       // Internal operand; recurse but do nothing else
1892       ReduceOper( kid, newrule, mem, mach );
1893 
1894     } else {                    // Child is a new instruction




  27 #include "opto/addnode.hpp"
  28 #include "opto/callnode.hpp"
  29 #include "opto/connode.hpp"
  30 #include "opto/idealGraphPrinter.hpp"
  31 #include "opto/matcher.hpp"
  32 #include "opto/memnode.hpp"
  33 #include "opto/opcodes.hpp"
  34 #include "opto/regmask.hpp"
  35 #include "opto/rootnode.hpp"
  36 #include "opto/runtime.hpp"
  37 #include "opto/type.hpp"
  38 #include "opto/vectornode.hpp"
  39 #include "runtime/atomic.hpp"
  40 #include "runtime/os.hpp"
  41 #if defined AD_MD_HPP
  42 # include AD_MD_HPP
  43 #elif defined TARGET_ARCH_MODEL_x86_32
  44 # include "adfiles/ad_x86_32.hpp"
  45 #elif defined TARGET_ARCH_MODEL_x86_64
  46 # include "adfiles/ad_x86_64.hpp"
  47 #elif defined TARGET_ARCH_MODEL_aarch64
  48 # include "adfiles/ad_aarch64.hpp"
  49 #elif defined TARGET_ARCH_MODEL_sparc
  50 # include "adfiles/ad_sparc.hpp"
  51 #elif defined TARGET_ARCH_MODEL_zero
  52 # include "adfiles/ad_zero.hpp"
  53 #elif defined TARGET_ARCH_MODEL_ppc_64
  54 # include "adfiles/ad_ppc_64.hpp"
  55 #endif
  56 #if INCLUDE_ALL_GCS
  57 #include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp"
  58 #endif
  59 
  60 OptoReg::Name OptoReg::c_frame_pointer;
  61 
  62 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
  63 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
  64 RegMask Matcher::STACK_ONLY_mask;
  65 RegMask Matcher::c_frame_ptr_mask;
  66 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
  67 const uint Matcher::_end_rematerialize   = _END_REMATERIALIZE;
  68 
  69 //---------------------------Matcher-------------------------------------------
  70 Matcher::Matcher()
  71 : PhaseTransform( Phase::Ins_Select ),
  72 #ifdef ASSERT
  73   _old2new_map(C->comp_arena()),
  74   _new2old_map(C->comp_arena()),
  75 #endif
  76   _shared_nodes(C->comp_arena()),
  77   _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
  78   _swallowed(swallowed),


1010     C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1011     if (C->failing()) return NULL;
1012     n = mstack.node();          // Leave node on stack
1013     Node_State nstate = mstack.state();
1014     if (nstate == Visit) {
1015       mstack.set_state(Post_Visit);
1016       Node *oldn = n;
1017       // Old-space or new-space check
1018       if (!C->node_arena()->contains(n)) {
1019         // Old space!
1020         Node* m;
1021         if (has_new_node(n)) {  // Not yet Label/Reduced
1022           m = new_node(n);
1023         } else {
1024           if (!is_dontcare(n)) { // Matcher can match this guy
1025             // Calls match special.  They match alone with no children.
1026             // Their children, the incoming arguments, match normally.
1027             m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1028             if (C->failing())  return NULL;
1029             if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
1030             if (n->is_MemBar() && UseShenandoahGC) {
1031               m->as_MachMemBar()->set_adr_type(n->adr_type());
1032             }
1033           } else {                  // Nothing the matcher cares about
1034             if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) {       // Projections?
1035               // Convert to machine-dependent projection
1036               m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1037 #ifdef ASSERT
1038               _new2old_map.map(m->_idx, n);
1039 #endif
1040               if (m->in(0) != NULL) // m might be top
1041                 collect_null_checks(m, n);
1042             } else {                // Else just a regular 'ol guy
1043               m = n->clone();       // So just clone into new-space
1044 #ifdef ASSERT
1045               _new2old_map.map(m->_idx, n);
1046 #endif
1047               // Def-Use edges will be added incrementally as Uses
1048               // of this node are matched.
1049               assert(m->outcnt() == 0, "no Uses of this clone yet");
1050             }
1051           }
1052 


1057             C->set_node_notes_at(m->_idx, nn);
1058           }
1059           debug_only(match_alias_type(C, n, m));
1060         }
1061         n = m;    // n is now a new-space node
1062         mstack.set_node(n);
1063       }
1064 
1065       // New space!
1066       if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1067 
1068       int i;
1069       // Put precedence edges on stack first (match them last).
1070       for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1071         Node *m = oldn->in(i);
1072         if (m == NULL) break;
1073         // set -1 to call add_prec() instead of set_req() during Step1
1074         mstack.push(m, Visit, n, -1);
1075       }
1076 
1077       // Handle precedence edges for interior nodes
1078       for (i = n->len()-1; (uint)i >= n->req(); i--) {
1079         Node *m = n->in(i);
1080         if (m == NULL || C->node_arena()->contains(m)) continue;
1081         n->rm_prec(i);
1082         // set -1 to call add_prec() instead of set_req() during Step1
1083         mstack.push(m, Visit, n, -1);
1084       }
1085 
1086       // For constant debug info, I'd rather have unmatched constants.
1087       int cnt = n->req();
1088       JVMState* jvms = n->jvms();
1089       int debug_cnt = jvms ? jvms->debug_start() : cnt;
1090 
1091       // Now do only debug info.  Clone constants rather than matching.
1092       // Constants are represented directly in the debug info without
1093       // the need for executable machine instructions.
1094       // Monitor boxes are also represented directly.
1095       for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1096         Node *m = n->in(i);          // Get input
1097         int op = m->Opcode();
1098         assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1099         if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1100             op == Op_ConF || op == Op_ConD || op == Op_ConL
1101             // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
1102             ) {
1103           m = m->clone();
1104 #ifdef ASSERT
1105           _new2old_map.map(m->_idx, n);


1756 
1757   // PhaseChaitin::fixup_spills will sometimes generate spill code
1758   // via the matcher.  By the time, nodes have been wired into the CFG,
1759   // and any further nodes generated by expand rules will be left hanging
1760   // in space, and will not get emitted as output code.  Catch this.
1761   // Also, catch any new register allocation constraints ("projections")
1762   // generated belatedly during spill code generation.
1763   if (_allocation_started) {
1764     guarantee(ex == mach, "no expand rules during spill generation");
1765     guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1766   }
1767 
1768   if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1769     // Record the con for sharing
1770     _shared_nodes.map(leaf->_idx, ex);
1771   }
1772 
1773   return ex;
1774 }
1775 
1776 void Matcher::handle_precedence_edges(Node* n, MachNode *mach) {
1777   for (uint i = n->req(); i < n->len(); i++) {
1778     if (n->in(i) != NULL) {
1779       mach->add_prec(n->in(i));
1780     }
1781   }
1782 }
1783 
1784 void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
1785   // 'op' is what I am expecting to receive
1786   int op = _leftOp[rule];
1787   // Operand type to catch childs result
1788   // This is what my child will give me.
1789   int opnd_class_instance = s->_rule[op];
1790   // Choose between operand class or not.
1791   // This is what I will receive.
1792   int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1793   // New rule for child.  Chase operand classes to get the actual rule.
1794   int newrule = s->_rule[catch_op];
1795 
1796   if( newrule < NUM_OPERANDS ) {
1797     // Chain from operand or operand class, may be output of shared node
1798     assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
1799             "Bad AD file: Instruction chain rule must chain from operand");
1800     // Insert operand into array of operands for this instruction
1801     mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
1802 
1803     ReduceOper( s, newrule, mem, mach );
1804   } else {
1805     // Chain from the result of an instruction
1806     assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1807     mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
1808     Node *mem1 = (Node*)1;
1809     debug_only(Node *save_mem_node = _mem_node;)
1810     mach->add_req( ReduceInst(s, newrule, mem1) );
1811     debug_only(_mem_node = save_mem_node;)
1812   }
1813   return;
1814 }
1815 
1816 
1817 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1818   handle_precedence_edges(s->_leaf, mach);
1819 
1820   if( s->_leaf->is_Load() ) {
1821     Node *mem2 = s->_leaf->in(MemNode::Memory);
1822     assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1823     debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1824     mem = mem2;
1825   }
1826   if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1827     if( mach->in(0) == NULL )
1828       mach->set_req(0, s->_leaf->in(0));
1829   }
1830 
1831   // Now recursively walk the state tree & add operand list.
1832   for( uint i=0; i<2; i++ ) {   // binary tree
1833     State *newstate = s->_kids[i];
1834     if( newstate == NULL ) break;      // Might only have 1 child
1835     // 'op' is what I am expecting to receive
1836     int op;
1837     if( i == 0 ) {
1838       op = _leftOp[rule];
1839     } else {


1882 //     Skip over it ( do nothing )
1883 // (3) Child is an instruction -
1884 //     Call ReduceInst recursively and
1885 //     and instruction as an input to the MachNode
1886 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1887   assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1888   State *kid = s->_kids[0];
1889   assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1890 
1891   // Leaf?  And not subsumed?
1892   if( kid == NULL && !_swallowed[rule] ) {
1893     mach->add_req( s->_leaf );  // Add leaf pointer
1894     return;                     // Bail out
1895   }
1896 
1897   if( s->_leaf->is_Load() ) {
1898     assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1899     mem = s->_leaf->in(MemNode::Memory);
1900     debug_only(_mem_node = s->_leaf;)
1901   }
1902 
1903   handle_precedence_edges(s->_leaf, mach);
1904 
1905   if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1906     if( !mach->in(0) )
1907       mach->set_req(0,s->_leaf->in(0));
1908     else {
1909       assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1910     }
1911   }
1912 
1913   for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) {   // binary tree
1914     int newrule;
1915     if( i == 0)
1916       newrule = kid->_rule[_leftOp[rule]];
1917     else
1918       newrule = kid->_rule[_rightOp[rule]];
1919 
1920     if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
1921       // Internal operand; recurse but do nothing else
1922       ReduceOper( kid, newrule, mem, mach );
1923 
1924     } else {                    // Child is a new instruction


< prev index next >