< prev index next >

src/hotspot/share/opto/chaitin.cpp

Print this page




1651 
1652     } // End of for all instructions
1653 
1654   } // End of for all blocks
1655 }
1656 
1657 // Helper to stretch above; recursively discover the base Node for a
1658 // given derived Node.  Easy for AddP-related machine nodes, but needs
1659 // to be recursive for derived Phis.
1660 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) {
1661   // See if already computed; if so return it
1662   if( derived_base_map[derived->_idx] )
1663     return derived_base_map[derived->_idx];
1664 
1665   // See if this happens to be a base.
1666   // NOTE: we use TypePtr instead of TypeOopPtr because we can have
1667   // pointers derived from NULL!  These are always along paths that
1668   // can't happen at run-time but the optimizer cannot deduce it so
1669   // we have to handle it gracefully.
1670   assert(!derived->bottom_type()->isa_narrowoop() ||
1671           derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1672   const TypePtr *tj = derived->bottom_type()->isa_ptr();
1673   // If its an OOP with a non-zero offset, then it is derived.
1674   if( tj == NULL || tj->_offset == 0 ) {
1675     derived_base_map[derived->_idx] = derived;
1676     return derived;
1677   }
1678   // Derived is NULL+offset?  Base is NULL!
1679   if( derived->is_Con() ) {
1680     Node *base = _matcher.mach_null();
1681     assert(base != NULL, "sanity");
1682     if (base->in(0) == NULL) {
1683       // Initialize it once and make it shared:
1684       // set control to _root and place it into Start block
1685       // (where top() node is placed).
1686       base->init_req(0, _cfg.get_root_node());
1687       Block *startb = _cfg.get_block_for_node(C->top());
1688       uint node_pos = startb->find_node(C->top());
1689       startb->insert_node(base, node_pos);
1690       _cfg.map_node_to_block(base, startb);
1691       assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1692 
1693       // The loadConP0 might have projection nodes depending on architecture
1694       // Add the projection nodes to the CFG


1820         // Copies do not define a new value and so do not interfere.
1821         // Remove the copies source from the liveout set before interfering.
1822         uint idx = n->is_Copy();
1823         if (idx) {
1824           liveout.remove(_lrg_map.live_range_id(n->in(idx)));
1825         }
1826       }
1827 
1828       // Found a safepoint?
1829       JVMState *jvms = n->jvms();
1830       if( jvms ) {
1831         // Now scan for a live derived pointer
1832         IndexSetIterator elements(&liveout);
1833         uint neighbor;
1834         while ((neighbor = elements.next()) != 0) {
1835           // Find reaching DEF for base and derived values
1836           // This works because we are still in SSA during this call.
1837           Node *derived = lrgs(neighbor)._def;
1838           const TypePtr *tj = derived->bottom_type()->isa_ptr();
1839           assert(!derived->bottom_type()->isa_narrowoop() ||
1840                   derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1841           // If its an OOP with a non-zero offset, then it is derived.
1842           if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
1843             Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
1844             assert(base->_idx < _lrg_map.size(), "");
1845             // Add reaching DEFs of derived pointer and base pointer as a
1846             // pair of inputs
1847             n->add_req(derived);
1848             n->add_req(base);
1849 
1850             // See if the base pointer is already live to this point.
1851             // Since I'm working on the SSA form, live-ness amounts to
1852             // reaching def's.  So if I find the base's live range then
1853             // I know the base's def reaches here.
1854             if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1855                  !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1856                  (_lrg_map.live_range_id(base) > 0) && // not a constant
1857                  _cfg.get_block_for_node(base) != block) { // base not def'd in blk)
1858               // Base pointer is not currently live.  Since I stretched
1859               // the base pointer to here and it crosses basic-block
1860               // boundaries, the global live info is now incorrect.
1861               // Recompute live.
1862               must_recompute_live = true;


2112   return buf+strlen(buf);
2113 }
2114 
2115 void PhaseChaitin::dump_for_spill_split_recycle() const {
2116   if( WizardMode && (PrintCompilation || PrintOpto) ) {
2117     // Display which live ranges need to be split and the allocator's state
2118     tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
2119     for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
2120       if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
2121         tty->print("L%d: ", bidx);
2122         lrgs(bidx).dump();
2123       }
2124     }
2125     tty->cr();
2126     dump();
2127   }
2128 }
2129 
2130 void PhaseChaitin::dump_frame() const {
2131   const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
2132   const TypeTuple *domain = C->tf()->domain();
2133   const int        argcnt = domain->cnt() - TypeFunc::Parms;
2134 
2135   // Incoming arguments in registers dump
2136   for( int k = 0; k < argcnt; k++ ) {
2137     OptoReg::Name parmreg = _matcher._parm_regs[k].first();
2138     if( OptoReg::is_reg(parmreg))  {
2139       const char *reg_name = OptoReg::regname(parmreg);
2140       tty->print("#r%3.3d %s", parmreg, reg_name);
2141       parmreg = _matcher._parm_regs[k].second();
2142       if( OptoReg::is_reg(parmreg))  {
2143         tty->print(":%s", OptoReg::regname(parmreg));
2144       }
2145       tty->print("   : parm %d: ", k);
2146       domain->field_at(k + TypeFunc::Parms)->dump();
2147       tty->cr();
2148     }
2149   }
2150 
2151   // Check for un-owned padding above incoming args
2152   OptoReg::Name reg = _matcher._new_SP;
2153   if( reg > _matcher._in_arg_limit ) {
2154     reg = OptoReg::add(reg, -1);
2155     tty->print_cr("#r%3.3d %s+%2d: pad0, owned by CALLER", reg, fp, reg2offset_unchecked(reg));
2156   }
2157 
2158   // Incoming argument area dump
2159   OptoReg::Name begin_in_arg = OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots());
2160   while( reg > begin_in_arg ) {
2161     reg = OptoReg::add(reg, -1);
2162     tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2163     int j;
2164     for( j = 0; j < argcnt; j++) {
2165       if( _matcher._parm_regs[j].first() == reg ||
2166           _matcher._parm_regs[j].second() == reg ) {
2167         tty->print("parm %d: ",j);
2168         domain->field_at(j + TypeFunc::Parms)->dump();





2169         tty->cr();
2170         break;
2171       }
2172     }
2173     if( j >= argcnt )
2174       tty->print_cr("HOLE, owned by SELF");
2175   }
2176 
2177   // Old outgoing preserve area
2178   while( reg > _matcher._old_SP ) {
2179     reg = OptoReg::add(reg, -1);
2180     tty->print_cr("#r%3.3d %s+%2d: old out preserve",reg,fp,reg2offset_unchecked(reg));
2181   }
2182 
2183   // Old SP
2184   tty->print_cr("# -- Old %s -- Framesize: %d --",fp,
2185     reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize);
2186 
2187   // Preserve area dump
2188   int fixed_slots = C->fixed_slots();




1651 
1652     } // End of for all instructions
1653 
1654   } // End of for all blocks
1655 }
1656 
1657 // Helper to stretch above; recursively discover the base Node for a
1658 // given derived Node.  Easy for AddP-related machine nodes, but needs
1659 // to be recursive for derived Phis.
1660 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) {
1661   // See if already computed; if so return it
1662   if( derived_base_map[derived->_idx] )
1663     return derived_base_map[derived->_idx];
1664 
1665   // See if this happens to be a base.
1666   // NOTE: we use TypePtr instead of TypeOopPtr because we can have
1667   // pointers derived from NULL!  These are always along paths that
1668   // can't happen at run-time but the optimizer cannot deduce it so
1669   // we have to handle it gracefully.
1670   assert(!derived->bottom_type()->isa_narrowoop() ||
1671          derived->bottom_type()->make_ptr()->is_ptr()->offset() == 0, "sanity");
1672   const TypePtr *tj = derived->bottom_type()->isa_ptr();
1673   // If its an OOP with a non-zero offset, then it is derived.
1674   if (tj == NULL || tj->offset() == 0) {
1675     derived_base_map[derived->_idx] = derived;
1676     return derived;
1677   }
1678   // Derived is NULL+offset?  Base is NULL!
1679   if( derived->is_Con() ) {
1680     Node *base = _matcher.mach_null();
1681     assert(base != NULL, "sanity");
1682     if (base->in(0) == NULL) {
1683       // Initialize it once and make it shared:
1684       // set control to _root and place it into Start block
1685       // (where top() node is placed).
1686       base->init_req(0, _cfg.get_root_node());
1687       Block *startb = _cfg.get_block_for_node(C->top());
1688       uint node_pos = startb->find_node(C->top());
1689       startb->insert_node(base, node_pos);
1690       _cfg.map_node_to_block(base, startb);
1691       assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1692 
1693       // The loadConP0 might have projection nodes depending on architecture
1694       // Add the projection nodes to the CFG


1820         // Copies do not define a new value and so do not interfere.
1821         // Remove the copies source from the liveout set before interfering.
1822         uint idx = n->is_Copy();
1823         if (idx) {
1824           liveout.remove(_lrg_map.live_range_id(n->in(idx)));
1825         }
1826       }
1827 
1828       // Found a safepoint?
1829       JVMState *jvms = n->jvms();
1830       if( jvms ) {
1831         // Now scan for a live derived pointer
1832         IndexSetIterator elements(&liveout);
1833         uint neighbor;
1834         while ((neighbor = elements.next()) != 0) {
1835           // Find reaching DEF for base and derived values
1836           // This works because we are still in SSA during this call.
1837           Node *derived = lrgs(neighbor)._def;
1838           const TypePtr *tj = derived->bottom_type()->isa_ptr();
1839           assert(!derived->bottom_type()->isa_narrowoop() ||
1840                  derived->bottom_type()->make_ptr()->is_ptr()->offset() == 0, "sanity");
1841           // If its an OOP with a non-zero offset, then it is derived.
1842           if (tj && tj->offset() != 0 && tj->isa_oop_ptr()) {
1843             Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
1844             assert(base->_idx < _lrg_map.size(), "");
1845             // Add reaching DEFs of derived pointer and base pointer as a
1846             // pair of inputs
1847             n->add_req(derived);
1848             n->add_req(base);
1849 
1850             // See if the base pointer is already live to this point.
1851             // Since I'm working on the SSA form, live-ness amounts to
1852             // reaching def's.  So if I find the base's live range then
1853             // I know the base's def reaches here.
1854             if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1855                  !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1856                  (_lrg_map.live_range_id(base) > 0) && // not a constant
1857                  _cfg.get_block_for_node(base) != block) { // base not def'd in blk)
1858               // Base pointer is not currently live.  Since I stretched
1859               // the base pointer to here and it crosses basic-block
1860               // boundaries, the global live info is now incorrect.
1861               // Recompute live.
1862               must_recompute_live = true;


2112   return buf+strlen(buf);
2113 }
2114 
2115 void PhaseChaitin::dump_for_spill_split_recycle() const {
2116   if( WizardMode && (PrintCompilation || PrintOpto) ) {
2117     // Display which live ranges need to be split and the allocator's state
2118     tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
2119     for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
2120       if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
2121         tty->print("L%d: ", bidx);
2122         lrgs(bidx).dump();
2123       }
2124     }
2125     tty->cr();
2126     dump();
2127   }
2128 }
2129 
2130 void PhaseChaitin::dump_frame() const {
2131   const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
2132   const TypeTuple *domain = C->tf()->domain_cc();
2133   const int        argcnt = domain->cnt() - TypeFunc::Parms;
2134 
2135   // Incoming arguments in registers dump
2136   for( int k = 0; k < argcnt; k++ ) {
2137     OptoReg::Name parmreg = _matcher._parm_regs[k].first();
2138     if( OptoReg::is_reg(parmreg))  {
2139       const char *reg_name = OptoReg::regname(parmreg);
2140       tty->print("#r%3.3d %s", parmreg, reg_name);
2141       parmreg = _matcher._parm_regs[k].second();
2142       if( OptoReg::is_reg(parmreg))  {
2143         tty->print(":%s", OptoReg::regname(parmreg));
2144       }
2145       tty->print("   : parm %d: ", k);
2146       domain->field_at(k + TypeFunc::Parms)->dump();
2147       tty->cr();
2148     }
2149   }
2150 
2151   // Check for un-owned padding above incoming args
2152   OptoReg::Name reg = _matcher._new_SP;
2153   if( reg > _matcher._in_arg_limit ) {
2154     reg = OptoReg::add(reg, -1);
2155     tty->print_cr("#r%3.3d %s+%2d: pad0, owned by CALLER", reg, fp, reg2offset_unchecked(reg));
2156   }
2157 
2158   // Incoming argument area dump
2159   OptoReg::Name begin_in_arg = OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots());
2160   while( reg > begin_in_arg ) {
2161     reg = OptoReg::add(reg, -1);
2162     tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2163     int j;
2164     for( j = 0; j < argcnt; j++) {
2165       if( _matcher._parm_regs[j].first() == reg ||
2166           _matcher._parm_regs[j].second() == reg ) {
2167         tty->print("parm %d: ",j);
2168         domain->field_at(j + TypeFunc::Parms)->dump();
2169         if (!C->FIRST_STACK_mask().Member(reg)) {
2170           // Reserved entry in the argument stack area that is not used because
2171           // it may hold the return address (see Matcher::init_first_stack_mask()).
2172           tty->print(" [RESERVED] ");
2173         }
2174         tty->cr();
2175         break;
2176       }
2177     }
2178     if( j >= argcnt )
2179       tty->print_cr("HOLE, owned by SELF");
2180   }
2181 
2182   // Old outgoing preserve area
2183   while( reg > _matcher._old_SP ) {
2184     reg = OptoReg::add(reg, -1);
2185     tty->print_cr("#r%3.3d %s+%2d: old out preserve",reg,fp,reg2offset_unchecked(reg));
2186   }
2187 
2188   // Old SP
2189   tty->print_cr("# -- Old %s -- Framesize: %d --",fp,
2190     reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize);
2191 
2192   // Preserve area dump
2193   int fixed_slots = C->fixed_slots();


< prev index next >