1764
1765 } // End of for all instructions
1766
1767 } // End of for all blocks
1768 }
1769
1770 // Helper to stretch above; recursively discover the base Node for a
1771 // given derived Node. Easy for AddP-related machine nodes, but needs
1772 // to be recursive for derived Phis.
1773 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) {
1774 // See if already computed; if so return it
1775 if( derived_base_map[derived->_idx] )
1776 return derived_base_map[derived->_idx];
1777
1778 // See if this happens to be a base.
1779 // NOTE: we use TypePtr instead of TypeOopPtr because we can have
1780 // pointers derived from NULL! These are always along paths that
1781 // can't happen at run-time but the optimizer cannot deduce it so
1782 // we have to handle it gracefully.
1783 assert(!derived->bottom_type()->isa_narrowoop() ||
1784 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1785 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1786 // If its an OOP with a non-zero offset, then it is derived.
1787 if( tj == NULL || tj->_offset == 0 ) {
1788 derived_base_map[derived->_idx] = derived;
1789 return derived;
1790 }
1791 // Derived is NULL+offset? Base is NULL!
1792 if( derived->is_Con() ) {
1793 Node *base = _matcher.mach_null();
1794 assert(base != NULL, "sanity");
1795 if (base->in(0) == NULL) {
1796 // Initialize it once and make it shared:
1797 // set control to _root and place it into Start block
1798 // (where top() node is placed).
1799 base->init_req(0, _cfg.get_root_node());
1800 Block *startb = _cfg.get_block_for_node(C->top());
1801 uint node_pos = startb->find_node(C->top());
1802 startb->insert_node(base, node_pos);
1803 _cfg.map_node_to_block(base, startb);
1804 assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1805
1806 // The loadConP0 might have projection nodes depending on architecture
1807 // Add the projection nodes to the CFG
1933 // Copies do not define a new value and so do not interfere.
1934 // Remove the copies source from the liveout set before interfering.
1935 uint idx = n->is_Copy();
1936 if (idx) {
1937 liveout.remove(_lrg_map.live_range_id(n->in(idx)));
1938 }
1939 }
1940
1941 // Found a safepoint?
1942 JVMState *jvms = n->jvms();
1943 if (jvms && !liveout.is_empty()) {
1944 // Now scan for a live derived pointer
1945 IndexSetIterator elements(&liveout);
1946 uint neighbor;
1947 while ((neighbor = elements.next()) != 0) {
1948 // Find reaching DEF for base and derived values
1949 // This works because we are still in SSA during this call.
1950 Node *derived = lrgs(neighbor)._def;
1951 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1952 assert(!derived->bottom_type()->isa_narrowoop() ||
1953 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1954 // If its an OOP with a non-zero offset, then it is derived.
1955 if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
1956 Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
1957 assert(base->_idx < _lrg_map.size(), "");
1958 // Add reaching DEFs of derived pointer and base pointer as a
1959 // pair of inputs
1960 n->add_req(derived);
1961 n->add_req(base);
1962
1963 // See if the base pointer is already live to this point.
1964 // Since I'm working on the SSA form, live-ness amounts to
1965 // reaching def's. So if I find the base's live range then
1966 // I know the base's def reaches here.
1967 if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1968 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1969 (_lrg_map.live_range_id(base) > 0) && // not a constant
1970 _cfg.get_block_for_node(base) != block) { // base not def'd in blk)
1971 // Base pointer is not currently live. Since I stretched
1972 // the base pointer to here and it crosses basic-block
1973 // boundaries, the global live info is now incorrect.
1974 // Recompute live.
1975 must_recompute_live = true;
2225 return buf+strlen(buf);
2226 }
2227
2228 void PhaseChaitin::dump_for_spill_split_recycle() const {
2229 if( WizardMode && (PrintCompilation || PrintOpto) ) {
2230 // Display which live ranges need to be split and the allocator's state
2231 tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
2232 for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
2233 if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
2234 tty->print("L%d: ", bidx);
2235 lrgs(bidx).dump();
2236 }
2237 }
2238 tty->cr();
2239 dump();
2240 }
2241 }
2242
2243 void PhaseChaitin::dump_frame() const {
2244 const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
2245 const TypeTuple *domain = C->tf()->domain();
2246 const int argcnt = domain->cnt() - TypeFunc::Parms;
2247
2248 // Incoming arguments in registers dump
2249 for( int k = 0; k < argcnt; k++ ) {
2250 OptoReg::Name parmreg = _matcher._parm_regs[k].first();
2251 if( OptoReg::is_reg(parmreg)) {
2252 const char *reg_name = OptoReg::regname(parmreg);
2253 tty->print("#r%3.3d %s", parmreg, reg_name);
2254 parmreg = _matcher._parm_regs[k].second();
2255 if( OptoReg::is_reg(parmreg)) {
2256 tty->print(":%s", OptoReg::regname(parmreg));
2257 }
2258 tty->print(" : parm %d: ", k);
2259 domain->field_at(k + TypeFunc::Parms)->dump();
2260 tty->cr();
2261 }
2262 }
2263
2264 // Check for un-owned padding above incoming args
2265 OptoReg::Name reg = _matcher._new_SP;
2434 // Check each derived/base pair
2435 for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx++) {
2436 Node* check = sfpt->in(idx);
2437 bool is_derived = ((idx - jvms->oopoff()) & 1) == 0;
2438 // search upwards through spills and spill phis for AddP
2439 worklist.clear();
2440 worklist.push(check);
2441 uint k = 0;
2442 while (k < worklist.size()) {
2443 check = worklist.at(k);
2444 assert(check, "Bad base or derived pointer");
2445 // See PhaseChaitin::find_base_for_derived() for all cases.
2446 int isc = check->is_Copy();
2447 if (isc) {
2448 worklist.push(check->in(isc));
2449 } else if (check->is_Phi()) {
2450 for (uint m = 1; m < check->req(); m++) {
2451 worklist.push(check->in(m));
2452 }
2453 } else if (check->is_Con()) {
2454 if (is_derived && check->bottom_type()->is_ptr()->_offset != 0) {
2455 // Derived is NULL+non-zero offset, base must be NULL.
2456 assert(check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad derived pointer");
2457 } else {
2458 assert(check->bottom_type()->is_ptr()->_offset == 0, "Bad base pointer");
2459 // Base either ConP(NULL) or loadConP
2460 if (check->is_Mach()) {
2461 assert(check->as_Mach()->ideal_Opcode() == Op_ConP, "Bad base pointer");
2462 } else {
2463 assert(check->Opcode() == Op_ConP &&
2464 check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad base pointer");
2465 }
2466 }
2467 } else if (check->bottom_type()->is_ptr()->_offset == 0) {
2468 if (check->is_Proj() || (check->is_Mach() &&
2469 (check->as_Mach()->ideal_Opcode() == Op_CreateEx ||
2470 check->as_Mach()->ideal_Opcode() == Op_ThreadLocal ||
2471 check->as_Mach()->ideal_Opcode() == Op_CMoveP ||
2472 check->as_Mach()->ideal_Opcode() == Op_CheckCastPP ||
2473 #ifdef _LP64
2474 (UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP) ||
2475 (UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN) ||
2476 (UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass) ||
2477 #endif // _LP64
2478 check->as_Mach()->ideal_Opcode() == Op_LoadP ||
2479 check->as_Mach()->ideal_Opcode() == Op_LoadKlass))) {
2480 // Valid nodes
2481 } else {
2482 check->dump();
2483 assert(false, "Bad base or derived pointer");
2484 }
2485 } else {
2486 assert(is_derived, "Bad base pointer");
2487 assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP, "Bad derived pointer");
|
1764
1765 } // End of for all instructions
1766
1767 } // End of for all blocks
1768 }
1769
1770 // Helper to stretch above; recursively discover the base Node for a
1771 // given derived Node. Easy for AddP-related machine nodes, but needs
1772 // to be recursive for derived Phis.
1773 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) {
1774 // See if already computed; if so return it
1775 if( derived_base_map[derived->_idx] )
1776 return derived_base_map[derived->_idx];
1777
1778 // See if this happens to be a base.
1779 // NOTE: we use TypePtr instead of TypeOopPtr because we can have
1780 // pointers derived from NULL! These are always along paths that
1781 // can't happen at run-time but the optimizer cannot deduce it so
1782 // we have to handle it gracefully.
1783 assert(!derived->bottom_type()->isa_narrowoop() ||
1784 derived->bottom_type()->make_ptr()->is_ptr()->offset() == 0, "sanity");
1785 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1786 // If its an OOP with a non-zero offset, then it is derived.
1787 if (tj == NULL || tj->offset() == 0) {
1788 derived_base_map[derived->_idx] = derived;
1789 return derived;
1790 }
1791 // Derived is NULL+offset? Base is NULL!
1792 if( derived->is_Con() ) {
1793 Node *base = _matcher.mach_null();
1794 assert(base != NULL, "sanity");
1795 if (base->in(0) == NULL) {
1796 // Initialize it once and make it shared:
1797 // set control to _root and place it into Start block
1798 // (where top() node is placed).
1799 base->init_req(0, _cfg.get_root_node());
1800 Block *startb = _cfg.get_block_for_node(C->top());
1801 uint node_pos = startb->find_node(C->top());
1802 startb->insert_node(base, node_pos);
1803 _cfg.map_node_to_block(base, startb);
1804 assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1805
1806 // The loadConP0 might have projection nodes depending on architecture
1807 // Add the projection nodes to the CFG
1933 // Copies do not define a new value and so do not interfere.
1934 // Remove the copies source from the liveout set before interfering.
1935 uint idx = n->is_Copy();
1936 if (idx) {
1937 liveout.remove(_lrg_map.live_range_id(n->in(idx)));
1938 }
1939 }
1940
1941 // Found a safepoint?
1942 JVMState *jvms = n->jvms();
1943 if (jvms && !liveout.is_empty()) {
1944 // Now scan for a live derived pointer
1945 IndexSetIterator elements(&liveout);
1946 uint neighbor;
1947 while ((neighbor = elements.next()) != 0) {
1948 // Find reaching DEF for base and derived values
1949 // This works because we are still in SSA during this call.
1950 Node *derived = lrgs(neighbor)._def;
1951 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1952 assert(!derived->bottom_type()->isa_narrowoop() ||
1953 derived->bottom_type()->make_ptr()->is_ptr()->offset() == 0, "sanity");
1954 // If its an OOP with a non-zero offset, then it is derived.
1955 if (tj && tj->offset() != 0 && tj->isa_oop_ptr()) {
1956 Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
1957 assert(base->_idx < _lrg_map.size(), "");
1958 // Add reaching DEFs of derived pointer and base pointer as a
1959 // pair of inputs
1960 n->add_req(derived);
1961 n->add_req(base);
1962
1963 // See if the base pointer is already live to this point.
1964 // Since I'm working on the SSA form, live-ness amounts to
1965 // reaching def's. So if I find the base's live range then
1966 // I know the base's def reaches here.
1967 if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1968 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1969 (_lrg_map.live_range_id(base) > 0) && // not a constant
1970 _cfg.get_block_for_node(base) != block) { // base not def'd in blk)
1971 // Base pointer is not currently live. Since I stretched
1972 // the base pointer to here and it crosses basic-block
1973 // boundaries, the global live info is now incorrect.
1974 // Recompute live.
1975 must_recompute_live = true;
2225 return buf+strlen(buf);
2226 }
2227
2228 void PhaseChaitin::dump_for_spill_split_recycle() const {
2229 if( WizardMode && (PrintCompilation || PrintOpto) ) {
2230 // Display which live ranges need to be split and the allocator's state
2231 tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
2232 for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
2233 if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
2234 tty->print("L%d: ", bidx);
2235 lrgs(bidx).dump();
2236 }
2237 }
2238 tty->cr();
2239 dump();
2240 }
2241 }
2242
2243 void PhaseChaitin::dump_frame() const {
2244 const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
2245 const TypeTuple *domain = C->tf()->domain_cc();
2246 const int argcnt = domain->cnt() - TypeFunc::Parms;
2247
2248 // Incoming arguments in registers dump
2249 for( int k = 0; k < argcnt; k++ ) {
2250 OptoReg::Name parmreg = _matcher._parm_regs[k].first();
2251 if( OptoReg::is_reg(parmreg)) {
2252 const char *reg_name = OptoReg::regname(parmreg);
2253 tty->print("#r%3.3d %s", parmreg, reg_name);
2254 parmreg = _matcher._parm_regs[k].second();
2255 if( OptoReg::is_reg(parmreg)) {
2256 tty->print(":%s", OptoReg::regname(parmreg));
2257 }
2258 tty->print(" : parm %d: ", k);
2259 domain->field_at(k + TypeFunc::Parms)->dump();
2260 tty->cr();
2261 }
2262 }
2263
2264 // Check for un-owned padding above incoming args
2265 OptoReg::Name reg = _matcher._new_SP;
2434 // Check each derived/base pair
2435 for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx++) {
2436 Node* check = sfpt->in(idx);
2437 bool is_derived = ((idx - jvms->oopoff()) & 1) == 0;
2438 // search upwards through spills and spill phis for AddP
2439 worklist.clear();
2440 worklist.push(check);
2441 uint k = 0;
2442 while (k < worklist.size()) {
2443 check = worklist.at(k);
2444 assert(check, "Bad base or derived pointer");
2445 // See PhaseChaitin::find_base_for_derived() for all cases.
2446 int isc = check->is_Copy();
2447 if (isc) {
2448 worklist.push(check->in(isc));
2449 } else if (check->is_Phi()) {
2450 for (uint m = 1; m < check->req(); m++) {
2451 worklist.push(check->in(m));
2452 }
2453 } else if (check->is_Con()) {
2454 if (is_derived && check->bottom_type()->is_ptr()->offset() != 0) {
2455 // Derived is NULL+non-zero offset, base must be NULL.
2456 assert(check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad derived pointer");
2457 } else {
2458 assert(check->bottom_type()->is_ptr()->offset() == 0, "Bad base pointer");
2459 // Base either ConP(NULL) or loadConP
2460 if (check->is_Mach()) {
2461 assert(check->as_Mach()->ideal_Opcode() == Op_ConP, "Bad base pointer");
2462 } else {
2463 assert(check->Opcode() == Op_ConP &&
2464 check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad base pointer");
2465 }
2466 }
2467 } else if (check->bottom_type()->is_ptr()->offset() == 0) {
2468 if (check->is_Proj() || (check->is_Mach() &&
2469 (check->as_Mach()->ideal_Opcode() == Op_CreateEx ||
2470 check->as_Mach()->ideal_Opcode() == Op_ThreadLocal ||
2471 check->as_Mach()->ideal_Opcode() == Op_CMoveP ||
2472 check->as_Mach()->ideal_Opcode() == Op_CheckCastPP ||
2473 #ifdef _LP64
2474 (UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP) ||
2475 (UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN) ||
2476 (UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass) ||
2477 #endif // _LP64
2478 check->as_Mach()->ideal_Opcode() == Op_LoadP ||
2479 check->as_Mach()->ideal_Opcode() == Op_LoadKlass))) {
2480 // Valid nodes
2481 } else {
2482 check->dump();
2483 assert(false, "Bad base or derived pointer");
2484 }
2485 } else {
2486 assert(is_derived, "Bad base pointer");
2487 assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP, "Bad derived pointer");
|