< prev index next >

src/hotspot/share/opto/lcm.cpp

Print this page




 768 // carry lots of stuff live across a call.
 769 void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) {
 770   // Find the next control-defining Node in this block
 771   Node* call = NULL;
 772   for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
 773     Node* m = this_call->fast_out(i);
 774     if (get_block_for_node(m) == block && // Local-block user
 775         m != this_call &&       // Not self-start node
 776         m->is_MachCall()) {
 777       call = m;
 778       break;
 779     }
 780   }
 781   if (call == NULL)  return;    // No next call (e.g., block end is near)
 782   // Set next-call for all inputs to this call
 783   set_next_call(block, call, next_call);
 784 }
 785 
 786 //------------------------------add_call_kills-------------------------------------
 787 // helper function that adds caller save registers to MachProjNode
 788 static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
 789   // Fill in the kill mask for the call
 790   for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {



 791     if( !regs.Member(r) ) {     // Not already defined by the call
 792       // Save-on-call register?
 793       if ((save_policy[r] == 'C') ||
 794           (save_policy[r] == 'A') ||
 795           ((save_policy[r] == 'E') && exclude_soe)) {
 796         proj->_rout.Insert(r);
 797       }
 798     }
 799   }
 800 }
 801 
 802 
 803 //------------------------------sched_call-------------------------------------
 804 uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call) {
 805   RegMask regs;
 806 
 807   // Schedule all the users of the call right now.  All the users are
 808   // projection Nodes, so they must be scheduled next to the call.
 809   // Collect all the defined registers.
 810   for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) {


 871   // When using CallRuntime mark SOE registers as killed by the call
 872   // so values that could show up in the RegisterMap aren't live in a
 873   // callee saved register since the register wouldn't know where to
 874   // find them.  CallLeaf and CallLeafNoFP are ok because they can't
 875   // have debug info on them.  Strictly speaking this only needs to be
 876   // done for oops since idealreg2debugmask takes care of debug info
 877   // references but there no way to handle oops differently than other
 878   // pointers as far as the kill mask goes.
 879   bool exclude_soe = op == Op_CallRuntime;
 880 
 881   // If the call is a MethodHandle invoke, we need to exclude the
 882   // register which is used to save the SP value over MH invokes from
 883   // the mask.  Otherwise this register could be used for
 884   // deoptimization information.
 885   if (op == Op_CallStaticJava) {
 886     MachCallStaticJavaNode* mcallstaticjava = (MachCallStaticJavaNode*) mcall;
 887     if (mcallstaticjava->_method_handle_invoke)
 888       proj->_rout.OR(Matcher::method_handle_invoke_SP_save_mask());
 889   }
 890 
 891   add_call_kills(proj, regs, save_policy, exclude_soe);
 892 
 893   return node_cnt;
 894 }
 895 
 896 
 897 //------------------------------schedule_local---------------------------------
 898 // Topological sort within a block.  Someday become a real scheduler.
 899 bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call, intptr_t *recalc_pressure_nodes) {
 900   // Already "sorted" are the block start Node (as the first entry), and
 901   // the block-ending Node and any trailing control projections.  We leave
 902   // these alone.  PhiNodes and ParmNodes are made to follow the block start
 903   // Node.  Everything else gets topo-sorted.
 904 
 905 #ifndef PRODUCT
 906     if (trace_opto_pipelining()) {
 907       tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order);
 908       for (uint i = 0;i < block->number_of_nodes(); i++) {
 909         tty->print("# ");
 910         block->get_node(i)->fast_dump();
 911       }


1112         tty->cr();
1113       }
1114     }
1115 
1116 #endif
1117     if( n->is_MachCall() ) {
1118       MachCallNode *mcall = n->as_MachCall();
1119       phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call);
1120       continue;
1121     }
1122 
1123     if (n->is_Mach() && n->as_Mach()->has_call()) {
1124       RegMask regs;
1125       regs.Insert(_matcher.c_frame_pointer());
1126       regs.OR(n->out_RegMask());
1127 
1128       MachProjNode *proj = new MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
1129       map_node_to_block(proj, block);
1130       block->insert_node(proj, phi_cnt++);
1131 
1132       add_call_kills(proj, regs, _matcher._c_reg_save_policy, false);
1133     }
1134 
1135     // Children are now all ready
1136     for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
1137       Node* m = n->fast_out(i5); // Get user
1138       if (get_block_for_node(m) != block) {
1139         continue;
1140       }
1141       if( m->is_Phi() ) continue;
1142       if (m->_idx >= max_idx) { // new node, skip it
1143         assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
1144         continue;
1145       }
1146       int m_cnt = ready_cnt.at(m->_idx) - 1;
1147       ready_cnt.at_put(m->_idx, m_cnt);
1148       if( m_cnt == 0 )
1149         worklist.push(m);
1150     }
1151   }
1152 




 768 // carry lots of stuff live across a call.
 769 void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) {
 770   // Find the next control-defining Node in this block
 771   Node* call = NULL;
 772   for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
 773     Node* m = this_call->fast_out(i);
 774     if (get_block_for_node(m) == block && // Local-block user
 775         m != this_call &&       // Not self-start node
 776         m->is_MachCall()) {
 777       call = m;
 778       break;
 779     }
 780   }
 781   if (call == NULL)  return;    // No next call (e.g., block end is near)
 782   // Set next-call for all inputs to this call
 783   set_next_call(block, call, next_call);
 784 }
 785 
 786 //------------------------------add_call_kills-------------------------------------
 787 // helper function that adds caller save registers to MachProjNode
 788 static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe, bool exclude_fp) {
 789   // Fill in the kill mask for the call
 790   for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
 791     if (exclude_fp && (register_save_type[r] == Op_RegF || register_save_type[r] == Op_RegD)) {
 792       continue;
 793     }
 794     if( !regs.Member(r) ) {     // Not already defined by the call
 795       // Save-on-call register?
 796       if ((save_policy[r] == 'C') ||
 797           (save_policy[r] == 'A') ||
 798           ((save_policy[r] == 'E') && exclude_soe)) {
 799         proj->_rout.Insert(r);
 800       }
 801     }
 802   }
 803 }
 804 
 805 
 806 //------------------------------sched_call-------------------------------------
 807 uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call) {
 808   RegMask regs;
 809 
 810   // Schedule all the users of the call right now.  All the users are
 811   // projection Nodes, so they must be scheduled next to the call.
 812   // Collect all the defined registers.
 813   for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) {


 874   // When using CallRuntime mark SOE registers as killed by the call
 875   // so values that could show up in the RegisterMap aren't live in a
 876   // callee saved register since the register wouldn't know where to
 877   // find them.  CallLeaf and CallLeafNoFP are ok because they can't
 878   // have debug info on them.  Strictly speaking this only needs to be
 879   // done for oops since idealreg2debugmask takes care of debug info
 880   // references but there no way to handle oops differently than other
 881   // pointers as far as the kill mask goes.
 882   bool exclude_soe = op == Op_CallRuntime;
 883 
 884   // If the call is a MethodHandle invoke, we need to exclude the
 885   // register which is used to save the SP value over MH invokes from
 886   // the mask.  Otherwise this register could be used for
 887   // deoptimization information.
 888   if (op == Op_CallStaticJava) {
 889     MachCallStaticJavaNode* mcallstaticjava = (MachCallStaticJavaNode*) mcall;
 890     if (mcallstaticjava->_method_handle_invoke)
 891       proj->_rout.OR(Matcher::method_handle_invoke_SP_save_mask());
 892   }
 893 
 894   add_call_kills(proj, regs, save_policy, exclude_soe, false);
 895 
 896   return node_cnt;
 897 }
 898 
 899 
 900 //------------------------------schedule_local---------------------------------
 901 // Topological sort within a block.  Someday become a real scheduler.
 902 bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call, intptr_t *recalc_pressure_nodes) {
 903   // Already "sorted" are the block start Node (as the first entry), and
 904   // the block-ending Node and any trailing control projections.  We leave
 905   // these alone.  PhiNodes and ParmNodes are made to follow the block start
 906   // Node.  Everything else gets topo-sorted.
 907 
 908 #ifndef PRODUCT
 909     if (trace_opto_pipelining()) {
 910       tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order);
 911       for (uint i = 0;i < block->number_of_nodes(); i++) {
 912         tty->print("# ");
 913         block->get_node(i)->fast_dump();
 914       }


1115         tty->cr();
1116       }
1117     }
1118 
1119 #endif
1120     if( n->is_MachCall() ) {
1121       MachCallNode *mcall = n->as_MachCall();
1122       phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call);
1123       continue;
1124     }
1125 
1126     if (n->is_Mach() && n->as_Mach()->has_call()) {
1127       RegMask regs;
1128       regs.Insert(_matcher.c_frame_pointer());
1129       regs.OR(n->out_RegMask());
1130 
1131       MachProjNode *proj = new MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
1132       map_node_to_block(proj, block);
1133       block->insert_node(proj, phi_cnt++);
1134 
1135       add_call_kills(proj, regs, _matcher._c_reg_save_policy, false, false);
1136     }
1137 
1138     // Children are now all ready
1139     for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
1140       Node* m = n->fast_out(i5); // Get user
1141       if (get_block_for_node(m) != block) {
1142         continue;
1143       }
1144       if( m->is_Phi() ) continue;
1145       if (m->_idx >= max_idx) { // new node, skip it
1146         assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
1147         continue;
1148       }
1149       int m_cnt = ready_cnt.at(m->_idx) - 1;
1150       ready_cnt.at_put(m->_idx, m_cnt);
1151       if( m_cnt == 0 )
1152         worklist.push(m);
1153     }
1154   }
1155 


< prev index next >