< prev index next >

src/share/vm/opto/matcher.cpp

Print this page




  36 #include "opto/runtime.hpp"
  37 #include "opto/type.hpp"
  38 #include "opto/vectornode.hpp"
  39 #include "runtime/atomic.hpp"
  40 #include "runtime/os.hpp"
  41 #if defined AD_MD_HPP
  42 # include AD_MD_HPP
  43 #elif defined TARGET_ARCH_MODEL_x86_32
  44 # include "adfiles/ad_x86_32.hpp"
  45 #elif defined TARGET_ARCH_MODEL_x86_64
  46 # include "adfiles/ad_x86_64.hpp"
  47 #elif defined TARGET_ARCH_MODEL_aarch64
  48 # include "adfiles/ad_aarch64.hpp"
  49 #elif defined TARGET_ARCH_MODEL_sparc
  50 # include "adfiles/ad_sparc.hpp"
  51 #elif defined TARGET_ARCH_MODEL_zero
  52 # include "adfiles/ad_zero.hpp"
  53 #elif defined TARGET_ARCH_MODEL_ppc_64
  54 # include "adfiles/ad_ppc_64.hpp"
  55 #endif



  56 
  57 OptoReg::Name OptoReg::c_frame_pointer;
  58 
  59 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
  60 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
  61 RegMask Matcher::STACK_ONLY_mask;
  62 RegMask Matcher::c_frame_ptr_mask;
  63 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
  64 const uint Matcher::_end_rematerialize   = _END_REMATERIALIZE;
  65 
  66 //---------------------------Matcher-------------------------------------------
  67 Matcher::Matcher()
  68 : PhaseTransform( Phase::Ins_Select ),
  69 #ifdef ASSERT
  70   _old2new_map(C->comp_arena()),
  71   _new2old_map(C->comp_arena()),
  72 #endif
  73   _shared_nodes(C->comp_arena()),
  74   _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
  75   _swallowed(swallowed),


1007     C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1008     if (C->failing()) return NULL;
1009     n = mstack.node();          // Leave node on stack
1010     Node_State nstate = mstack.state();
1011     if (nstate == Visit) {
1012       mstack.set_state(Post_Visit);
1013       Node *oldn = n;
1014       // Old-space or new-space check
1015       if (!C->node_arena()->contains(n)) {
1016         // Old space!
1017         Node* m;
1018         if (has_new_node(n)) {  // Not yet Label/Reduced
1019           m = new_node(n);
1020         } else {
1021           if (!is_dontcare(n)) { // Matcher can match this guy
1022             // Calls match special.  They match alone with no children.
1023             // Their children, the incoming arguments, match normally.
1024             m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1025             if (C->failing())  return NULL;
1026             if (m == NULL) { Matcher::soft_match_failure(); return NULL; }



1027           } else {                  // Nothing the matcher cares about
1028             if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) {       // Projections?
1029               // Convert to machine-dependent projection
1030               m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1031 #ifdef ASSERT
1032               _new2old_map.map(m->_idx, n);
1033 #endif
1034               if (m->in(0) != NULL) // m might be top
1035                 collect_null_checks(m, n);
1036             } else {                // Else just a regular 'ol guy
1037               m = n->clone();       // So just clone into new-space
1038 #ifdef ASSERT
1039               _new2old_map.map(m->_idx, n);
1040 #endif
1041               // Def-Use edges will be added incrementally as Uses
1042               // of this node are matched.
1043               assert(m->outcnt() == 0, "no Uses of this clone yet");
1044             }
1045           }
1046 


1051             C->set_node_notes_at(m->_idx, nn);
1052           }
1053           debug_only(match_alias_type(C, n, m));
1054         }
1055         n = m;    // n is now a new-space node
1056         mstack.set_node(n);
1057       }
1058 
1059       // New space!
1060       if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1061 
1062       int i;
1063       // Put precedence edges on stack first (match them last).
1064       for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1065         Node *m = oldn->in(i);
1066         if (m == NULL) break;
1067         // set -1 to call add_prec() instead of set_req() during Step1
1068         mstack.push(m, Visit, n, -1);
1069       }
1070 









1071       // For constant debug info, I'd rather have unmatched constants.
1072       int cnt = n->req();
1073       JVMState* jvms = n->jvms();
1074       int debug_cnt = jvms ? jvms->debug_start() : cnt;
1075 
1076       // Now do only debug info.  Clone constants rather than matching.
1077       // Constants are represented directly in the debug info without
1078       // the need for executable machine instructions.
1079       // Monitor boxes are also represented directly.
1080       for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1081         Node *m = n->in(i);          // Get input
1082         int op = m->Opcode();
1083         assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1084         if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1085             op == Op_ConF || op == Op_ConD || op == Op_ConL
1086             // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
1087             ) {
1088           m = m->clone();
1089 #ifdef ASSERT
1090           _new2old_map.map(m->_idx, n);


1741 
1742   // PhaseChaitin::fixup_spills will sometimes generate spill code
1743   // via the matcher.  By the time, nodes have been wired into the CFG,
1744   // and any further nodes generated by expand rules will be left hanging
1745   // in space, and will not get emitted as output code.  Catch this.
1746   // Also, catch any new register allocation constraints ("projections")
1747   // generated belatedly during spill code generation.
1748   if (_allocation_started) {
1749     guarantee(ex == mach, "no expand rules during spill generation");
1750     guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1751   }
1752 
1753   if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1754     // Record the con for sharing
1755     _shared_nodes.map(leaf->_idx, ex);
1756   }
1757 
1758   return ex;
1759 }
1760 








1761 void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
1762   // 'op' is what I am expecting to receive
1763   int op = _leftOp[rule];
1764   // Operand type to catch childs result
1765   // This is what my child will give me.
1766   int opnd_class_instance = s->_rule[op];
1767   // Choose between operand class or not.
1768   // This is what I will receive.
1769   int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1770   // New rule for child.  Chase operand classes to get the actual rule.
1771   int newrule = s->_rule[catch_op];
1772 
1773   if( newrule < NUM_OPERANDS ) {
1774     // Chain from operand or operand class, may be output of shared node
1775     assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
1776             "Bad AD file: Instruction chain rule must chain from operand");
1777     // Insert operand into array of operands for this instruction
1778     mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
1779 
1780     ReduceOper( s, newrule, mem, mach );
1781   } else {
1782     // Chain from the result of an instruction
1783     assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1784     mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
1785     Node *mem1 = (Node*)1;
1786     debug_only(Node *save_mem_node = _mem_node;)
1787     mach->add_req( ReduceInst(s, newrule, mem1) );
1788     debug_only(_mem_node = save_mem_node;)
1789   }
1790   return;
1791 }
1792 
1793 
1794 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {


1795   if( s->_leaf->is_Load() ) {
1796     Node *mem2 = s->_leaf->in(MemNode::Memory);
1797     assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1798     debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1799     mem = mem2;
1800   }
1801   if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1802     if( mach->in(0) == NULL )
1803       mach->set_req(0, s->_leaf->in(0));
1804   }
1805 
1806   // Now recursively walk the state tree & add operand list.
1807   for( uint i=0; i<2; i++ ) {   // binary tree
1808     State *newstate = s->_kids[i];
1809     if( newstate == NULL ) break;      // Might only have 1 child
1810     // 'op' is what I am expecting to receive
1811     int op;
1812     if( i == 0 ) {
1813       op = _leftOp[rule];
1814     } else {


1857 //     Skip over it ( do nothing )
1858 // (3) Child is an instruction -
1859 //     Call ReduceInst recursively and
1860 //     and instruction as an input to the MachNode
1861 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1862   assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1863   State *kid = s->_kids[0];
1864   assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1865 
1866   // Leaf?  And not subsumed?
1867   if( kid == NULL && !_swallowed[rule] ) {
1868     mach->add_req( s->_leaf );  // Add leaf pointer
1869     return;                     // Bail out
1870   }
1871 
1872   if( s->_leaf->is_Load() ) {
1873     assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1874     mem = s->_leaf->in(MemNode::Memory);
1875     debug_only(_mem_node = s->_leaf;)
1876   }



1877   if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1878     if( !mach->in(0) )
1879       mach->set_req(0,s->_leaf->in(0));
1880     else {
1881       assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1882     }
1883   }
1884 
1885   for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) {   // binary tree
1886     int newrule;
1887     if( i == 0)
1888       newrule = kid->_rule[_leftOp[rule]];
1889     else
1890       newrule = kid->_rule[_rightOp[rule]];
1891 
1892     if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
1893       // Internal operand; recurse but do nothing else
1894       ReduceOper( kid, newrule, mem, mach );
1895 
1896     } else {                    // Child is a new instruction




  36 #include "opto/runtime.hpp"
  37 #include "opto/type.hpp"
  38 #include "opto/vectornode.hpp"
  39 #include "runtime/atomic.hpp"
  40 #include "runtime/os.hpp"
  41 #if defined AD_MD_HPP
  42 # include AD_MD_HPP
  43 #elif defined TARGET_ARCH_MODEL_x86_32
  44 # include "adfiles/ad_x86_32.hpp"
  45 #elif defined TARGET_ARCH_MODEL_x86_64
  46 # include "adfiles/ad_x86_64.hpp"
  47 #elif defined TARGET_ARCH_MODEL_aarch64
  48 # include "adfiles/ad_aarch64.hpp"
  49 #elif defined TARGET_ARCH_MODEL_sparc
  50 # include "adfiles/ad_sparc.hpp"
  51 #elif defined TARGET_ARCH_MODEL_zero
  52 # include "adfiles/ad_zero.hpp"
  53 #elif defined TARGET_ARCH_MODEL_ppc_64
  54 # include "adfiles/ad_ppc_64.hpp"
  55 #endif
  56 #if INCLUDE_ALL_GCS
  57 #include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp"
  58 #endif
  59 
  60 OptoReg::Name OptoReg::c_frame_pointer;
  61 
  62 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
  63 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
  64 RegMask Matcher::STACK_ONLY_mask;
  65 RegMask Matcher::c_frame_ptr_mask;
  66 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
  67 const uint Matcher::_end_rematerialize   = _END_REMATERIALIZE;
  68 
  69 //---------------------------Matcher-------------------------------------------
  70 Matcher::Matcher()
  71 : PhaseTransform( Phase::Ins_Select ),
  72 #ifdef ASSERT
  73   _old2new_map(C->comp_arena()),
  74   _new2old_map(C->comp_arena()),
  75 #endif
  76   _shared_nodes(C->comp_arena()),
  77   _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
  78   _swallowed(swallowed),


1010     C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1011     if (C->failing()) return NULL;
1012     n = mstack.node();          // Leave node on stack
1013     Node_State nstate = mstack.state();
1014     if (nstate == Visit) {
1015       mstack.set_state(Post_Visit);
1016       Node *oldn = n;
1017       // Old-space or new-space check
1018       if (!C->node_arena()->contains(n)) {
1019         // Old space!
1020         Node* m;
1021         if (has_new_node(n)) {  // Not yet Label/Reduced
1022           m = new_node(n);
1023         } else {
1024           if (!is_dontcare(n)) { // Matcher can match this guy
1025             // Calls match special.  They match alone with no children.
1026             // Their children, the incoming arguments, match normally.
1027             m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1028             if (C->failing())  return NULL;
1029             if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
1030             if (n->is_MemBar() && UseShenandoahGC) {
1031               m->as_MachMemBar()->set_adr_type(n->adr_type());
1032             }
1033           } else {                  // Nothing the matcher cares about
1034             if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) {       // Projections?
1035               // Convert to machine-dependent projection
1036               m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1037 #ifdef ASSERT
1038               _new2old_map.map(m->_idx, n);
1039 #endif
1040               if (m->in(0) != NULL) // m might be top
1041                 collect_null_checks(m, n);
1042             } else {                // Else just a regular 'ol guy
1043               m = n->clone();       // So just clone into new-space
1044 #ifdef ASSERT
1045               _new2old_map.map(m->_idx, n);
1046 #endif
1047               // Def-Use edges will be added incrementally as Uses
1048               // of this node are matched.
1049               assert(m->outcnt() == 0, "no Uses of this clone yet");
1050             }
1051           }
1052 


1057             C->set_node_notes_at(m->_idx, nn);
1058           }
1059           debug_only(match_alias_type(C, n, m));
1060         }
1061         n = m;    // n is now a new-space node
1062         mstack.set_node(n);
1063       }
1064 
1065       // New space!
1066       if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1067 
1068       int i;
1069       // Put precedence edges on stack first (match them last).
1070       for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1071         Node *m = oldn->in(i);
1072         if (m == NULL) break;
1073         // set -1 to call add_prec() instead of set_req() during Step1
1074         mstack.push(m, Visit, n, -1);
1075       }
1076 
1077       // Handle precedence edges for interior nodes
1078       for (i = n->len()-1; (uint)i >= n->req(); i--) {
1079         Node *m = n->in(i);
1080         if (m == NULL || C->node_arena()->contains(m)) continue;
1081         n->rm_prec(i);
1082         // set -1 to call add_prec() instead of set_req() during Step1
1083         mstack.push(m, Visit, n, -1);
1084       }
1085 
1086       // For constant debug info, I'd rather have unmatched constants.
1087       int cnt = n->req();
1088       JVMState* jvms = n->jvms();
1089       int debug_cnt = jvms ? jvms->debug_start() : cnt;
1090 
1091       // Now do only debug info.  Clone constants rather than matching.
1092       // Constants are represented directly in the debug info without
1093       // the need for executable machine instructions.
1094       // Monitor boxes are also represented directly.
1095       for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1096         Node *m = n->in(i);          // Get input
1097         int op = m->Opcode();
1098         assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1099         if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1100             op == Op_ConF || op == Op_ConD || op == Op_ConL
1101             // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
1102             ) {
1103           m = m->clone();
1104 #ifdef ASSERT
1105           _new2old_map.map(m->_idx, n);


1756 
1757   // PhaseChaitin::fixup_spills will sometimes generate spill code
1758   // via the matcher.  By the time, nodes have been wired into the CFG,
1759   // and any further nodes generated by expand rules will be left hanging
1760   // in space, and will not get emitted as output code.  Catch this.
1761   // Also, catch any new register allocation constraints ("projections")
1762   // generated belatedly during spill code generation.
1763   if (_allocation_started) {
1764     guarantee(ex == mach, "no expand rules during spill generation");
1765     guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1766   }
1767 
1768   if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1769     // Record the con for sharing
1770     _shared_nodes.map(leaf->_idx, ex);
1771   }
1772 
1773   return ex;
1774 }
1775 
1776 void Matcher::handle_precedence_edges(Node* n, MachNode *mach) {
1777   for (uint i = n->req(); i < n->len(); i++) {
1778     if (n->in(i) != NULL) {
1779       mach->add_prec(n->in(i));
1780     }
1781   }
1782 }
1783 
1784 void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
1785   // 'op' is what I am expecting to receive
1786   int op = _leftOp[rule];
1787   // Operand type to catch childs result
1788   // This is what my child will give me.
1789   int opnd_class_instance = s->_rule[op];
1790   // Choose between operand class or not.
1791   // This is what I will receive.
1792   int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1793   // New rule for child.  Chase operand classes to get the actual rule.
1794   int newrule = s->_rule[catch_op];
1795 
1796   if( newrule < NUM_OPERANDS ) {
1797     // Chain from operand or operand class, may be output of shared node
1798     assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
1799             "Bad AD file: Instruction chain rule must chain from operand");
1800     // Insert operand into array of operands for this instruction
1801     mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
1802 
1803     ReduceOper( s, newrule, mem, mach );
1804   } else {
1805     // Chain from the result of an instruction
1806     assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1807     mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
1808     Node *mem1 = (Node*)1;
1809     debug_only(Node *save_mem_node = _mem_node;)
1810     mach->add_req( ReduceInst(s, newrule, mem1) );
1811     debug_only(_mem_node = save_mem_node;)
1812   }
1813   return;
1814 }
1815 
1816 
1817 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1818   handle_precedence_edges(s->_leaf, mach);
1819 
1820   if( s->_leaf->is_Load() ) {
1821     Node *mem2 = s->_leaf->in(MemNode::Memory);
1822     assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1823     debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1824     mem = mem2;
1825   }
1826   if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1827     if( mach->in(0) == NULL )
1828       mach->set_req(0, s->_leaf->in(0));
1829   }
1830 
1831   // Now recursively walk the state tree & add operand list.
1832   for( uint i=0; i<2; i++ ) {   // binary tree
1833     State *newstate = s->_kids[i];
1834     if( newstate == NULL ) break;      // Might only have 1 child
1835     // 'op' is what I am expecting to receive
1836     int op;
1837     if( i == 0 ) {
1838       op = _leftOp[rule];
1839     } else {


1882 //     Skip over it ( do nothing )
1883 // (3) Child is an instruction -
1884 //     Call ReduceInst recursively and
1885 //     and instruction as an input to the MachNode
1886 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1887   assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1888   State *kid = s->_kids[0];
1889   assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1890 
1891   // Leaf?  And not subsumed?
1892   if( kid == NULL && !_swallowed[rule] ) {
1893     mach->add_req( s->_leaf );  // Add leaf pointer
1894     return;                     // Bail out
1895   }
1896 
1897   if( s->_leaf->is_Load() ) {
1898     assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1899     mem = s->_leaf->in(MemNode::Memory);
1900     debug_only(_mem_node = s->_leaf;)
1901   }
1902 
1903   handle_precedence_edges(s->_leaf, mach);
1904 
1905   if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1906     if( !mach->in(0) )
1907       mach->set_req(0,s->_leaf->in(0));
1908     else {
1909       assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1910     }
1911   }
1912 
1913   for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) {   // binary tree
1914     int newrule;
1915     if( i == 0)
1916       newrule = kid->_rule[_leftOp[rule]];
1917     else
1918       newrule = kid->_rule[_rightOp[rule]];
1919 
1920     if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
1921       // Internal operand; recurse but do nothing else
1922       ReduceOper( kid, newrule, mem, mach );
1923 
1924     } else {                    // Child is a new instruction


< prev index next >