1751
1752 } // End of for all instructions
1753
1754 } // End of for all blocks
1755 }
1756
1757 // Helper to stretch above; recursively discover the base Node for a
1758 // given derived Node. Easy for AddP-related machine nodes, but needs
1759 // to be recursive for derived Phis.
1760 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) {
1761 // See if already computed; if so return it
1762 if( derived_base_map[derived->_idx] )
1763 return derived_base_map[derived->_idx];
1764
1765 // See if this happens to be a base.
1766 // NOTE: we use TypePtr instead of TypeOopPtr because we can have
1767 // pointers derived from NULL! These are always along paths that
1768 // can't happen at run-time but the optimizer cannot deduce it so
1769 // we have to handle it gracefully.
1770 assert(!derived->bottom_type()->isa_narrowoop() ||
1771 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1772 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1773 // If its an OOP with a non-zero offset, then it is derived.
1774 if( tj == NULL || tj->_offset == 0 ) {
1775 derived_base_map[derived->_idx] = derived;
1776 return derived;
1777 }
1778 // Derived is NULL+offset? Base is NULL!
1779 if( derived->is_Con() ) {
1780 Node *base = _matcher.mach_null();
1781 assert(base != NULL, "sanity");
1782 if (base->in(0) == NULL) {
1783 // Initialize it once and make it shared:
1784 // set control to _root and place it into Start block
1785 // (where top() node is placed).
1786 base->init_req(0, _cfg.get_root_node());
1787 Block *startb = _cfg.get_block_for_node(C->top());
1788 uint node_pos = startb->find_node(C->top());
1789 startb->insert_node(base, node_pos);
1790 _cfg.map_node_to_block(base, startb);
1791 assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1792
1793 // The loadConP0 might have projection nodes depending on architecture
1794 // Add the projection nodes to the CFG
1920 // Copies do not define a new value and so do not interfere.
1921 // Remove the copies source from the liveout set before interfering.
1922 uint idx = n->is_Copy();
1923 if (idx) {
1924 liveout.remove(_lrg_map.live_range_id(n->in(idx)));
1925 }
1926 }
1927
1928 // Found a safepoint?
1929 JVMState *jvms = n->jvms();
1930 if (jvms && !liveout.is_empty()) {
1931 // Now scan for a live derived pointer
1932 IndexSetIterator elements(&liveout);
1933 uint neighbor;
1934 while ((neighbor = elements.next()) != 0) {
1935 // Find reaching DEF for base and derived values
1936 // This works because we are still in SSA during this call.
1937 Node *derived = lrgs(neighbor)._def;
1938 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1939 assert(!derived->bottom_type()->isa_narrowoop() ||
1940 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1941 // If its an OOP with a non-zero offset, then it is derived.
1942 if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
1943 Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
1944 assert(base->_idx < _lrg_map.size(), "");
1945 // Add reaching DEFs of derived pointer and base pointer as a
1946 // pair of inputs
1947 n->add_req(derived);
1948 n->add_req(base);
1949
1950 // See if the base pointer is already live to this point.
1951 // Since I'm working on the SSA form, live-ness amounts to
1952 // reaching def's. So if I find the base's live range then
1953 // I know the base's def reaches here.
1954 if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1955 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1956 (_lrg_map.live_range_id(base) > 0) && // not a constant
1957 _cfg.get_block_for_node(base) != block) { // base not def'd in blk)
1958 // Base pointer is not currently live. Since I stretched
1959 // the base pointer to here and it crosses basic-block
1960 // boundaries, the global live info is now incorrect.
1961 // Recompute live.
1962 must_recompute_live = true;
2212 return buf+strlen(buf);
2213 }
2214
2215 void PhaseChaitin::dump_for_spill_split_recycle() const {
2216 if( WizardMode && (PrintCompilation || PrintOpto) ) {
2217 // Display which live ranges need to be split and the allocator's state
2218 tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
2219 for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
2220 if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
2221 tty->print("L%d: ", bidx);
2222 lrgs(bidx).dump();
2223 }
2224 }
2225 tty->cr();
2226 dump();
2227 }
2228 }
2229
2230 void PhaseChaitin::dump_frame() const {
2231 const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
2232 const TypeTuple *domain = C->tf()->domain();
2233 const int argcnt = domain->cnt() - TypeFunc::Parms;
2234
2235 // Incoming arguments in registers dump
2236 for( int k = 0; k < argcnt; k++ ) {
2237 OptoReg::Name parmreg = _matcher._parm_regs[k].first();
2238 if( OptoReg::is_reg(parmreg)) {
2239 const char *reg_name = OptoReg::regname(parmreg);
2240 tty->print("#r%3.3d %s", parmreg, reg_name);
2241 parmreg = _matcher._parm_regs[k].second();
2242 if( OptoReg::is_reg(parmreg)) {
2243 tty->print(":%s", OptoReg::regname(parmreg));
2244 }
2245 tty->print(" : parm %d: ", k);
2246 domain->field_at(k + TypeFunc::Parms)->dump();
2247 tty->cr();
2248 }
2249 }
2250
2251 // Check for un-owned padding above incoming args
2252 OptoReg::Name reg = _matcher._new_SP;
2421 // Check each derived/base pair
2422 for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx++) {
2423 Node* check = sfpt->in(idx);
2424 bool is_derived = ((idx - jvms->oopoff()) & 1) == 0;
2425 // search upwards through spills and spill phis for AddP
2426 worklist.clear();
2427 worklist.push(check);
2428 uint k = 0;
2429 while (k < worklist.size()) {
2430 check = worklist.at(k);
2431 assert(check, "Bad base or derived pointer");
2432 // See PhaseChaitin::find_base_for_derived() for all cases.
2433 int isc = check->is_Copy();
2434 if (isc) {
2435 worklist.push(check->in(isc));
2436 } else if (check->is_Phi()) {
2437 for (uint m = 1; m < check->req(); m++) {
2438 worklist.push(check->in(m));
2439 }
2440 } else if (check->is_Con()) {
2441 if (is_derived && check->bottom_type()->is_ptr()->_offset != 0) {
2442 // Derived is NULL+non-zero offset, base must be NULL.
2443 assert(check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad derived pointer");
2444 } else {
2445 assert(check->bottom_type()->is_ptr()->_offset == 0, "Bad base pointer");
2446 // Base either ConP(NULL) or loadConP
2447 if (check->is_Mach()) {
2448 assert(check->as_Mach()->ideal_Opcode() == Op_ConP, "Bad base pointer");
2449 } else {
2450 assert(check->Opcode() == Op_ConP &&
2451 check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad base pointer");
2452 }
2453 }
2454 } else if (check->bottom_type()->is_ptr()->_offset == 0) {
2455 if (check->is_Proj() || (check->is_Mach() &&
2456 (check->as_Mach()->ideal_Opcode() == Op_CreateEx ||
2457 check->as_Mach()->ideal_Opcode() == Op_ThreadLocal ||
2458 check->as_Mach()->ideal_Opcode() == Op_CMoveP ||
2459 check->as_Mach()->ideal_Opcode() == Op_CheckCastPP ||
2460 #ifdef _LP64
2461 (UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP) ||
2462 (UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN) ||
2463 (UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass) ||
2464 #endif // _LP64
2465 check->as_Mach()->ideal_Opcode() == Op_LoadP ||
2466 check->as_Mach()->ideal_Opcode() == Op_LoadKlass))) {
2467 // Valid nodes
2468 } else {
2469 check->dump();
2470 assert(false, "Bad base or derived pointer");
2471 }
2472 } else {
2473 assert(is_derived, "Bad base pointer");
2474 assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP, "Bad derived pointer");
|
1751
1752 } // End of for all instructions
1753
1754 } // End of for all blocks
1755 }
1756
1757 // Helper to stretch above; recursively discover the base Node for a
1758 // given derived Node. Easy for AddP-related machine nodes, but needs
1759 // to be recursive for derived Phis.
1760 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) {
1761 // See if already computed; if so return it
1762 if( derived_base_map[derived->_idx] )
1763 return derived_base_map[derived->_idx];
1764
1765 // See if this happens to be a base.
1766 // NOTE: we use TypePtr instead of TypeOopPtr because we can have
1767 // pointers derived from NULL! These are always along paths that
1768 // can't happen at run-time but the optimizer cannot deduce it so
1769 // we have to handle it gracefully.
1770 assert(!derived->bottom_type()->isa_narrowoop() ||
1771 derived->bottom_type()->make_ptr()->is_ptr()->offset() == 0, "sanity");
1772 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1773 // If its an OOP with a non-zero offset, then it is derived.
1774 if (tj == NULL || tj->offset() == 0) {
1775 derived_base_map[derived->_idx] = derived;
1776 return derived;
1777 }
1778 // Derived is NULL+offset? Base is NULL!
1779 if( derived->is_Con() ) {
1780 Node *base = _matcher.mach_null();
1781 assert(base != NULL, "sanity");
1782 if (base->in(0) == NULL) {
1783 // Initialize it once and make it shared:
1784 // set control to _root and place it into Start block
1785 // (where top() node is placed).
1786 base->init_req(0, _cfg.get_root_node());
1787 Block *startb = _cfg.get_block_for_node(C->top());
1788 uint node_pos = startb->find_node(C->top());
1789 startb->insert_node(base, node_pos);
1790 _cfg.map_node_to_block(base, startb);
1791 assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1792
1793 // The loadConP0 might have projection nodes depending on architecture
1794 // Add the projection nodes to the CFG
1920 // Copies do not define a new value and so do not interfere.
1921 // Remove the copies source from the liveout set before interfering.
1922 uint idx = n->is_Copy();
1923 if (idx) {
1924 liveout.remove(_lrg_map.live_range_id(n->in(idx)));
1925 }
1926 }
1927
1928 // Found a safepoint?
1929 JVMState *jvms = n->jvms();
1930 if (jvms && !liveout.is_empty()) {
1931 // Now scan for a live derived pointer
1932 IndexSetIterator elements(&liveout);
1933 uint neighbor;
1934 while ((neighbor = elements.next()) != 0) {
1935 // Find reaching DEF for base and derived values
1936 // This works because we are still in SSA during this call.
1937 Node *derived = lrgs(neighbor)._def;
1938 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1939 assert(!derived->bottom_type()->isa_narrowoop() ||
1940 derived->bottom_type()->make_ptr()->is_ptr()->offset() == 0, "sanity");
1941 // If its an OOP with a non-zero offset, then it is derived.
1942 if (tj && tj->offset() != 0 && tj->isa_oop_ptr()) {
1943 Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
1944 assert(base->_idx < _lrg_map.size(), "");
1945 // Add reaching DEFs of derived pointer and base pointer as a
1946 // pair of inputs
1947 n->add_req(derived);
1948 n->add_req(base);
1949
1950 // See if the base pointer is already live to this point.
1951 // Since I'm working on the SSA form, live-ness amounts to
1952 // reaching def's. So if I find the base's live range then
1953 // I know the base's def reaches here.
1954 if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1955 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1956 (_lrg_map.live_range_id(base) > 0) && // not a constant
1957 _cfg.get_block_for_node(base) != block) { // base not def'd in blk)
1958 // Base pointer is not currently live. Since I stretched
1959 // the base pointer to here and it crosses basic-block
1960 // boundaries, the global live info is now incorrect.
1961 // Recompute live.
1962 must_recompute_live = true;
2212 return buf+strlen(buf);
2213 }
2214
2215 void PhaseChaitin::dump_for_spill_split_recycle() const {
2216 if( WizardMode && (PrintCompilation || PrintOpto) ) {
2217 // Display which live ranges need to be split and the allocator's state
2218 tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
2219 for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
2220 if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
2221 tty->print("L%d: ", bidx);
2222 lrgs(bidx).dump();
2223 }
2224 }
2225 tty->cr();
2226 dump();
2227 }
2228 }
2229
2230 void PhaseChaitin::dump_frame() const {
2231 const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
2232 const TypeTuple *domain = C->tf()->domain_cc();
2233 const int argcnt = domain->cnt() - TypeFunc::Parms;
2234
2235 // Incoming arguments in registers dump
2236 for( int k = 0; k < argcnt; k++ ) {
2237 OptoReg::Name parmreg = _matcher._parm_regs[k].first();
2238 if( OptoReg::is_reg(parmreg)) {
2239 const char *reg_name = OptoReg::regname(parmreg);
2240 tty->print("#r%3.3d %s", parmreg, reg_name);
2241 parmreg = _matcher._parm_regs[k].second();
2242 if( OptoReg::is_reg(parmreg)) {
2243 tty->print(":%s", OptoReg::regname(parmreg));
2244 }
2245 tty->print(" : parm %d: ", k);
2246 domain->field_at(k + TypeFunc::Parms)->dump();
2247 tty->cr();
2248 }
2249 }
2250
2251 // Check for un-owned padding above incoming args
2252 OptoReg::Name reg = _matcher._new_SP;
2421 // Check each derived/base pair
2422 for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx++) {
2423 Node* check = sfpt->in(idx);
2424 bool is_derived = ((idx - jvms->oopoff()) & 1) == 0;
2425 // search upwards through spills and spill phis for AddP
2426 worklist.clear();
2427 worklist.push(check);
2428 uint k = 0;
2429 while (k < worklist.size()) {
2430 check = worklist.at(k);
2431 assert(check, "Bad base or derived pointer");
2432 // See PhaseChaitin::find_base_for_derived() for all cases.
2433 int isc = check->is_Copy();
2434 if (isc) {
2435 worklist.push(check->in(isc));
2436 } else if (check->is_Phi()) {
2437 for (uint m = 1; m < check->req(); m++) {
2438 worklist.push(check->in(m));
2439 }
2440 } else if (check->is_Con()) {
2441 if (is_derived && check->bottom_type()->is_ptr()->offset() != 0) {
2442 // Derived is NULL+non-zero offset, base must be NULL.
2443 assert(check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad derived pointer");
2444 } else {
2445 assert(check->bottom_type()->is_ptr()->offset() == 0, "Bad base pointer");
2446 // Base either ConP(NULL) or loadConP
2447 if (check->is_Mach()) {
2448 assert(check->as_Mach()->ideal_Opcode() == Op_ConP, "Bad base pointer");
2449 } else {
2450 assert(check->Opcode() == Op_ConP &&
2451 check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad base pointer");
2452 }
2453 }
2454 } else if (check->bottom_type()->is_ptr()->offset() == 0) {
2455 if (check->is_Proj() || (check->is_Mach() &&
2456 (check->as_Mach()->ideal_Opcode() == Op_CreateEx ||
2457 check->as_Mach()->ideal_Opcode() == Op_ThreadLocal ||
2458 check->as_Mach()->ideal_Opcode() == Op_CMoveP ||
2459 check->as_Mach()->ideal_Opcode() == Op_CheckCastPP ||
2460 #ifdef _LP64
2461 (UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP) ||
2462 (UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN) ||
2463 (UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass) ||
2464 #endif // _LP64
2465 check->as_Mach()->ideal_Opcode() == Op_LoadP ||
2466 check->as_Mach()->ideal_Opcode() == Op_LoadKlass))) {
2467 // Valid nodes
2468 } else {
2469 check->dump();
2470 assert(false, "Bad base or derived pointer");
2471 }
2472 } else {
2473 assert(is_derived, "Bad base pointer");
2474 assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP, "Bad derived pointer");
|