< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page




  65 #include "opto/phaseX.hpp"
  66 #include "opto/rootnode.hpp"
  67 #include "opto/runtime.hpp"
  68 #include "opto/stringopts.hpp"
  69 #include "opto/type.hpp"
  70 #include "opto/vectornode.hpp"
  71 #include "runtime/arguments.hpp"
  72 #include "runtime/sharedRuntime.hpp"
  73 #include "runtime/signature.hpp"
  74 #include "runtime/stubRoutines.hpp"
  75 #include "runtime/timer.hpp"
  76 #include "utilities/align.hpp"
  77 #include "utilities/copy.hpp"
  78 #include "utilities/macros.hpp"
  79 #if INCLUDE_G1GC
  80 #include "gc/g1/g1ThreadLocalData.hpp"
  81 #endif // INCLUDE_G1GC
  82 #if INCLUDE_ZGC
  83 #include "gc/z/c2/zBarrierSetC2.hpp"
  84 #endif



  85 
  86 
  87 // -------------------- Compile::mach_constant_base_node -----------------------
  88 // Constant table base node singleton.
  89 MachConstantBaseNode* Compile::mach_constant_base_node() {
  90   if (_mach_constant_base_node == NULL) {
  91     _mach_constant_base_node = new MachConstantBaseNode();
  92     _mach_constant_base_node->add_req(C->root());
  93   }
  94   return _mach_constant_base_node;
  95 }
  96 
  97 
  98 /// Support for intrinsics.
  99 
 100 // Return the index at which m must be inserted (or already exists).
 101 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
 102 class IntrinsicDescPair {
 103  private:
 104   ciMethod* _m;


2087 }
2088 
2089 // Perform incremental inlining until bound on number of live nodes is reached
2090 void Compile::inline_incrementally(PhaseIterGVN& igvn) {
2091   TracePhase tp("incrementalInline", &timers[_t_incrInline]);
2092 
2093   PhaseGVN* gvn = initial_gvn();
2094 
2095   set_inlining_incrementally(true);
2096   set_inlining_progress(true);
2097   uint low_live_nodes = 0;
2098 
2099   while(inlining_progress() && _late_inlines.length() > 0) {
2100 
2101     if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
2102       if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
2103         TracePhase tp("incrementalInline_ideal", &timers[_t_incrInline_ideal]);
2104         // PhaseIdealLoop is expensive so we only try it once we are
2105         // out of live nodes and we only try it again if the previous
2106         // helped got the number of nodes down significantly
2107         PhaseIdealLoop ideal_loop( igvn, false, true );
2108         if (failing())  return;
2109         low_live_nodes = live_nodes();
2110         _major_progress = true;
2111       }
2112 
2113       if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
2114         break;
2115       }
2116     }
2117 
2118     inline_incrementally_one(igvn);
2119 
2120     if (failing())  return;
2121 
2122     {
2123       TracePhase tp("incrementalInline_igvn", &timers[_t_incrInline_igvn]);
2124       igvn.optimize();
2125     }
2126 
2127     if (failing())  return;


2138 
2139     if (failing())  return;
2140 
2141     {
2142       TracePhase tp("incrementalInline_pru", &timers[_t_incrInline_pru]);
2143       ResourceMark rm;
2144       PhaseRemoveUseless pru(initial_gvn(), for_igvn());
2145     }
2146 
2147     {
2148       TracePhase tp("incrementalInline_igvn", &timers[_t_incrInline_igvn]);
2149       igvn = PhaseIterGVN(gvn);
2150       igvn.optimize();
2151     }
2152   }
2153 
2154   set_inlining_incrementally(false);
2155 }
2156 
2157 















2158 //------------------------------Optimize---------------------------------------
2159 // Given a graph, optimize it.
2160 void Compile::Optimize() {
2161   TracePhase tp("optimizer", &timers[_t_optimizer]);
2162 
2163 #ifndef PRODUCT
2164   if (_directive->BreakAtCompileOption) {
2165     BREAKPOINT;
2166   }
2167 
2168 #endif
2169 
2170 #ifdef ASSERT
2171   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2172   bs->verify_gc_barriers(true);
2173 #endif
2174 
2175   ResourceMark rm;
2176   int          loop_opts_cnt;
2177 
2178   print_inlining_reinit();
2179 
2180   NOT_PRODUCT( verify_graph_edges(); )
2181 
2182   print_method(PHASE_AFTER_PARSING);
2183 
2184  {
2185   // Iterative Global Value Numbering, including ideal transforms
2186   // Initialize IterGVN with types and values from parse-time GVN
2187   PhaseIterGVN igvn(initial_gvn());
2188 #ifdef ASSERT
2189   _modified_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
2190 #endif
2191   {
2192     TracePhase tp("iterGVN", &timers[_t_iterGVN]);
2193     igvn.optimize();
2194   }
2195 
2196   print_method(PHASE_ITER_GVN1, 2);
2197 
2198   if (failing())  return;
2199 


2200   inline_incrementally(igvn);
2201 
2202   print_method(PHASE_INCREMENTAL_INLINE, 2);
2203 
2204   if (failing())  return;
2205 
2206   if (eliminate_boxing()) {
2207     // Inline valueOf() methods now.
2208     inline_boxing_calls(igvn);
2209 
2210     if (AlwaysIncrementalInline) {
2211       inline_incrementally(igvn);
2212     }
2213 
2214     print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2215 
2216     if (failing())  return;
2217   }
2218 
2219   // Remove the speculative part of types and clean up the graph from


2228 
2229   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2230     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2231     initial_gvn()->replace_with(&igvn);
2232     for_igvn()->clear();
2233     Unique_Node_List new_worklist(C->comp_arena());
2234     {
2235       ResourceMark rm;
2236       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2237     }
2238     set_for_igvn(&new_worklist);
2239     igvn = PhaseIterGVN(initial_gvn());
2240     igvn.optimize();
2241   }
2242 
2243   // Perform escape analysis
2244   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2245     if (has_loops()) {
2246       // Cleanup graph (remove dead nodes).
2247       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2248       PhaseIdealLoop ideal_loop( igvn, false, true );
2249       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2250       if (failing())  return;
2251     }
2252     ConnectionGraph::do_analysis(this, &igvn);
2253 
2254     if (failing())  return;
2255 
2256     // Optimize out fields loads from scalar replaceable allocations.
2257     igvn.optimize();
2258     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2259 
2260     if (failing())  return;
2261 
2262     if (congraph() != NULL && macro_count() > 0) {
2263       TracePhase tp("macroEliminate", &timers[_t_macroEliminate]);
2264       PhaseMacroExpand mexp(igvn);
2265       mexp.eliminate_macro_nodes();
2266       igvn.set_delay_transform(false);
2267 
2268       igvn.optimize();
2269       print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
2270 
2271       if (failing())  return;
2272     }
2273   }
2274 
2275   // Loop transforms on the ideal graph.  Range Check Elimination,
2276   // peeling, unrolling, etc.
2277 
2278   // Set loop opts counter
2279   loop_opts_cnt = num_loop_opts();
2280   if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
2281     {
2282       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2283       PhaseIdealLoop ideal_loop( igvn, true );
2284       loop_opts_cnt--;
2285       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
2286       if (failing())  return;
2287     }
2288     // Loop opts pass if partial peeling occurred in previous pass
2289     if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
2290       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2291       PhaseIdealLoop ideal_loop( igvn, false );
2292       loop_opts_cnt--;
2293       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
2294       if (failing())  return;
2295     }
2296     // Loop opts pass for loop-unrolling before CCP
2297     if(major_progress() && (loop_opts_cnt > 0)) {
2298       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2299       PhaseIdealLoop ideal_loop( igvn, false );
2300       loop_opts_cnt--;
2301       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
2302     }
2303     if (!failing()) {
2304       // Verify that last round of loop opts produced a valid graph
2305       TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2306       PhaseIdealLoop::verify(igvn);
2307     }
2308   }
2309   if (failing())  return;
2310 
2311   // Conditional Constant Propagation;
2312   PhaseCCP ccp( &igvn );
2313   assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
2314   {
2315     TracePhase tp("ccp", &timers[_t_ccp]);
2316     ccp.do_transform();
2317   }
2318   print_method(PHASE_CPP1, 2);
2319 
2320   assert( true, "Break here to ccp.dump_old2new_map()");
2321 
2322   // Iterative Global Value Numbering, including ideal transforms
2323   {
2324     TracePhase tp("iterGVN2", &timers[_t_iterGVN2]);
2325     igvn = ccp;
2326     igvn.optimize();
2327   }
2328 
2329   print_method(PHASE_ITER_GVN2, 2);
2330 
2331   if (failing())  return;
2332 
2333   // Loop transforms on the ideal graph.  Range Check Elimination,
2334   // peeling, unrolling, etc.
2335   if(loop_opts_cnt > 0) {
2336     debug_only( int cnt = 0; );
2337     while(major_progress() && (loop_opts_cnt > 0)) {
2338       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2339       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2340       PhaseIdealLoop ideal_loop( igvn, true);
2341       loop_opts_cnt--;
2342       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2343       if (failing())  return;
2344     }
2345   }
2346 
2347 #if INCLUDE_ZGC
2348   if (UseZGC) {
2349     ZBarrierSetC2::find_dominating_barriers(igvn);
2350   }
2351 #endif
2352 
2353   if (failing())  return;
2354 
2355   // Ensure that major progress is now clear
2356   C->clear_major_progress();
2357 
2358   {
2359     // Verify that all previous optimizations produced a valid graph
2360     // at least to this point, even if no loop optimizations were done.
2361     TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2362     PhaseIdealLoop::verify(igvn);
2363   }
2364 


2366     // No more loop optimizations. Remove all range check dependent CastIINodes.
2367     C->remove_range_check_casts(igvn);
2368     igvn.optimize();
2369   }
2370 
2371 #ifdef ASSERT
2372   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2373   bs->verify_gc_barriers(false);
2374 #endif
2375 
2376   {
2377     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2378     PhaseMacroExpand  mex(igvn);
2379     print_method(PHASE_BEFORE_MACRO_EXPANSION, 2);
2380     if (mex.expand_macro_nodes()) {
2381       assert(failing(), "must bail out w/ explicit message");
2382       return;
2383     }
2384   }
2385 









2386   if (opaque4_count() > 0) {
2387     C->remove_opaque4_nodes(igvn);
2388     igvn.optimize();
2389   }
2390 
2391   DEBUG_ONLY( _modified_nodes = NULL; )
2392  } // (End scope of igvn; run destructor if necessary for asserts.)
2393 
2394  process_print_inlining();
2395  // A method with only infinite loops has no edges entering loops from root
2396  {
2397    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2398    if (final_graph_reshaping()) {
2399      assert(failing(), "must bail out w/ explicit message");
2400      return;
2401    }
2402  }
2403 
2404  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2405 }


2806   // case Op_ConvD2L: // handled by leaf call
2807   case Op_ConD:
2808   case Op_CmpD:
2809   case Op_CmpD3:
2810     frc.inc_double_count();
2811     break;
2812   case Op_Opaque1:              // Remove Opaque Nodes before matching
2813   case Op_Opaque2:              // Remove Opaque Nodes before matching
2814   case Op_Opaque3:
2815     n->subsume_by(n->in(1), this);
2816     break;
2817   case Op_CallStaticJava:
2818   case Op_CallJava:
2819   case Op_CallDynamicJava:
2820     frc.inc_java_call_count(); // Count java call site;
2821   case Op_CallRuntime:
2822   case Op_CallLeaf:
2823   case Op_CallLeafNoFP: {
2824     assert (n->is_Call(), "");
2825     CallNode *call = n->as_Call();











2826     // Count call sites where the FP mode bit would have to be flipped.
2827     // Do not count uncommon runtime calls:
2828     // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
2829     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
2830     if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
2831       frc.inc_call_count();   // Count the call site
2832     } else {                  // See if uncommon argument is shared
2833       Node *n = call->in(TypeFunc::Parms);
2834       int nop = n->Opcode();
2835       // Clone shared simple arguments to uncommon calls, item (1).
2836       if (n->outcnt() > 1 &&
2837           !n->is_Proj() &&
2838           nop != Op_CreateEx &&
2839           nop != Op_CheckCastPP &&
2840           nop != Op_DecodeN &&
2841           nop != Op_DecodeNKlass &&
2842           !n->is_Mem() &&
2843           !n->is_Phi()) {
2844         Node *x = n->clone();
2845         call->set_req(TypeFunc::Parms, x);


3370       // register allocation can be confused.
3371       ResourceMark rm;
3372       Unique_Node_List wq;
3373       wq.push(n->in(MemBarNode::Precedent));
3374       n->set_req(MemBarNode::Precedent, top());
3375       while (wq.size() > 0) {
3376         Node* m = wq.pop();
3377         if (m->outcnt() == 0) {
3378           for (uint j = 0; j < m->req(); j++) {
3379             Node* in = m->in(j);
3380             if (in != NULL) {
3381               wq.push(in);
3382             }
3383           }
3384           m->disconnect_inputs(NULL, this);
3385         }
3386       }
3387     }
3388     break;
3389   }






















3390   case Op_RangeCheck: {
3391     RangeCheckNode* rc = n->as_RangeCheck();
3392     Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
3393     n->subsume_by(iff, this);
3394     frc._tests.push(iff);
3395     break;
3396   }
3397   case Op_ConvI2L: {
3398     if (!Matcher::convi2l_type_required) {
3399       // Code generation on some platforms doesn't need accurate
3400       // ConvI2L types. Widening the type can help remove redundant
3401       // address computations.
3402       n->as_Type()->set_type(TypeLong::INT);
3403       ResourceMark rm;
3404       Node_List wq;
3405       wq.push(n);
3406       for (uint next = 0; next < wq.size(); next++) {
3407         Node *m = wq.at(next);
3408 
3409         for(;;) {


3807           if (use->is_Con())        continue;  // a dead ConNode is OK
3808           // At this point, we have found a dead node which is DU-reachable.
3809           if (!dead_nodes) {
3810             tty->print_cr("*** Dead nodes reachable via DU edges:");
3811             dead_nodes = true;
3812           }
3813           use->dump(2);
3814           tty->print_cr("---");
3815           checked.push(use);  // No repeats; pretend it is now checked.
3816         }
3817       }
3818       assert(!dead_nodes, "using nodes must be reachable from root");
3819     }
3820   }
3821 }
3822 
3823 // Verify GC barriers consistency
3824 // Currently supported:
3825 // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
3826 void Compile::verify_barriers() {
3827 #if INCLUDE_G1GC
3828   if (UseG1GC) {
3829     // Verify G1 pre-barriers





3830     const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());



3831 
3832     ResourceArea *area = Thread::current()->resource_area();
3833     Unique_Node_List visited(area);
3834     Node_List worklist(area);
3835     // We're going to walk control flow backwards starting from the Root
3836     worklist.push(_root);
3837     while (worklist.size() > 0) {
3838       Node* x = worklist.pop();
3839       if (x == NULL || x == top()) continue;
3840       if (visited.member(x)) {
3841         continue;
3842       } else {
3843         visited.push(x);
3844       }
3845 
3846       if (x->is_Region()) {
3847         for (uint i = 1; i < x->req(); i++) {
3848           worklist.push(x->in(i));
3849         }
3850       } else {


4566 
4567 /**
4568  * Remove the speculative part of types and clean up the graph
4569  */
4570 void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
4571   if (UseTypeSpeculation) {
4572     Unique_Node_List worklist;
4573     worklist.push(root());
4574     int modified = 0;
4575     // Go over all type nodes that carry a speculative type, drop the
4576     // speculative part of the type and enqueue the node for an igvn
4577     // which may optimize it out.
4578     for (uint next = 0; next < worklist.size(); ++next) {
4579       Node *n  = worklist.at(next);
4580       if (n->is_Type()) {
4581         TypeNode* tn = n->as_Type();
4582         const Type* t = tn->type();
4583         const Type* t_no_spec = t->remove_speculative();
4584         if (t_no_spec != t) {
4585           bool in_hash = igvn.hash_delete(n);
4586           assert(in_hash, "node should be in igvn hash table");
4587           tn->set_type(t_no_spec);
4588           igvn.hash_insert(n);
4589           igvn._worklist.push(n); // give it a chance to go away
4590           modified++;
4591         }
4592       }
4593       uint max = n->len();
4594       for( uint i = 0; i < max; ++i ) {
4595         Node *m = n->in(i);
4596         if (not_a_node(m))  continue;
4597         worklist.push(m);
4598       }
4599     }
4600     // Drop the speculative part of all types in the igvn's type table
4601     igvn.remove_speculative_types();
4602     if (modified > 0) {
4603       igvn.optimize();
4604     }
4605 #ifdef ASSERT
4606     // Verify that after the IGVN is over no speculative type has resurfaced




  65 #include "opto/phaseX.hpp"
  66 #include "opto/rootnode.hpp"
  67 #include "opto/runtime.hpp"
  68 #include "opto/stringopts.hpp"
  69 #include "opto/type.hpp"
  70 #include "opto/vectornode.hpp"
  71 #include "runtime/arguments.hpp"
  72 #include "runtime/sharedRuntime.hpp"
  73 #include "runtime/signature.hpp"
  74 #include "runtime/stubRoutines.hpp"
  75 #include "runtime/timer.hpp"
  76 #include "utilities/align.hpp"
  77 #include "utilities/copy.hpp"
  78 #include "utilities/macros.hpp"
  79 #if INCLUDE_G1GC
  80 #include "gc/g1/g1ThreadLocalData.hpp"
  81 #endif // INCLUDE_G1GC
  82 #if INCLUDE_ZGC
  83 #include "gc/z/c2/zBarrierSetC2.hpp"
  84 #endif
  85 #if INCLUDE_SHENANDOAHGC
  86 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  87 #endif
  88 
  89 
  90 // -------------------- Compile::mach_constant_base_node -----------------------
  91 // Constant table base node singleton.
  92 MachConstantBaseNode* Compile::mach_constant_base_node() {
  93   if (_mach_constant_base_node == NULL) {
  94     _mach_constant_base_node = new MachConstantBaseNode();
  95     _mach_constant_base_node->add_req(C->root());
  96   }
  97   return _mach_constant_base_node;
  98 }
  99 
 100 
 101 /// Support for intrinsics.
 102 
 103 // Return the index at which m must be inserted (or already exists).
 104 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
 105 class IntrinsicDescPair {
 106  private:
 107   ciMethod* _m;


2090 }
2091 
2092 // Perform incremental inlining until bound on number of live nodes is reached
2093 void Compile::inline_incrementally(PhaseIterGVN& igvn) {
2094   TracePhase tp("incrementalInline", &timers[_t_incrInline]);
2095 
2096   PhaseGVN* gvn = initial_gvn();
2097 
2098   set_inlining_incrementally(true);
2099   set_inlining_progress(true);
2100   uint low_live_nodes = 0;
2101 
2102   while(inlining_progress() && _late_inlines.length() > 0) {
2103 
2104     if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
2105       if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
2106         TracePhase tp("incrementalInline_ideal", &timers[_t_incrInline_ideal]);
2107         // PhaseIdealLoop is expensive so we only try it once we are
2108         // out of live nodes and we only try it again if the previous
2109         // helped got the number of nodes down significantly
2110         PhaseIdealLoop ideal_loop(igvn, LoopOptsNone);
2111         if (failing())  return;
2112         low_live_nodes = live_nodes();
2113         _major_progress = true;
2114       }
2115 
2116       if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
2117         break;
2118       }
2119     }
2120 
2121     inline_incrementally_one(igvn);
2122 
2123     if (failing())  return;
2124 
2125     {
2126       TracePhase tp("incrementalInline_igvn", &timers[_t_incrInline_igvn]);
2127       igvn.optimize();
2128     }
2129 
2130     if (failing())  return;


2141 
2142     if (failing())  return;
2143 
2144     {
2145       TracePhase tp("incrementalInline_pru", &timers[_t_incrInline_pru]);
2146       ResourceMark rm;
2147       PhaseRemoveUseless pru(initial_gvn(), for_igvn());
2148     }
2149 
2150     {
2151       TracePhase tp("incrementalInline_igvn", &timers[_t_incrInline_igvn]);
2152       igvn = PhaseIterGVN(gvn);
2153       igvn.optimize();
2154     }
2155   }
2156 
2157   set_inlining_incrementally(false);
2158 }
2159 
2160 
2161 bool Compile::optimize_loops(int& loop_opts_cnt, PhaseIterGVN& igvn, LoopOptsMode mode) {
2162   if(loop_opts_cnt > 0) {
2163     debug_only( int cnt = 0; );
2164     while(major_progress() && (loop_opts_cnt > 0)) {
2165       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2166       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2167       PhaseIdealLoop ideal_loop(igvn, mode);
2168       loop_opts_cnt--;
2169       if (failing())  return false;
2170       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2171     }
2172   }
2173   return true;
2174 }
2175 
2176 //------------------------------Optimize---------------------------------------
2177 // Given a graph, optimize it.
2178 void Compile::Optimize() {
2179   TracePhase tp("optimizer", &timers[_t_optimizer]);
2180 
2181 #ifndef PRODUCT
2182   if (_directive->BreakAtCompileOption) {
2183     BREAKPOINT;
2184   }
2185 
2186 #endif
2187 
2188 #ifdef ASSERT
2189   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2190   bs->verify_gc_barriers(true);
2191 #endif
2192 
2193   ResourceMark rm;
2194   int          loop_opts_cnt;
2195 
2196   print_inlining_reinit();
2197 
2198   NOT_PRODUCT( verify_graph_edges(); )
2199 
2200   print_method(PHASE_AFTER_PARSING);
2201 
2202  {
2203   // Iterative Global Value Numbering, including ideal transforms
2204   // Initialize IterGVN with types and values from parse-time GVN
2205   PhaseIterGVN igvn(initial_gvn());
2206 #ifdef ASSERT
2207   _modified_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
2208 #endif
2209   {
2210     TracePhase tp("iterGVN", &timers[_t_iterGVN]);
2211     igvn.optimize();
2212   }
2213 


2214   if (failing())  return;
2215 
2216   print_method(PHASE_ITER_GVN1, 2);
2217 
2218   inline_incrementally(igvn);
2219 
2220   print_method(PHASE_INCREMENTAL_INLINE, 2);
2221 
2222   if (failing())  return;
2223 
2224   if (eliminate_boxing()) {
2225     // Inline valueOf() methods now.
2226     inline_boxing_calls(igvn);
2227 
2228     if (AlwaysIncrementalInline) {
2229       inline_incrementally(igvn);
2230     }
2231 
2232     print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2233 
2234     if (failing())  return;
2235   }
2236 
2237   // Remove the speculative part of types and clean up the graph from


2246 
2247   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2248     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2249     initial_gvn()->replace_with(&igvn);
2250     for_igvn()->clear();
2251     Unique_Node_List new_worklist(C->comp_arena());
2252     {
2253       ResourceMark rm;
2254       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2255     }
2256     set_for_igvn(&new_worklist);
2257     igvn = PhaseIterGVN(initial_gvn());
2258     igvn.optimize();
2259   }
2260 
2261   // Perform escape analysis
2262   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2263     if (has_loops()) {
2264       // Cleanup graph (remove dead nodes).
2265       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2266       PhaseIdealLoop ideal_loop(igvn, LoopOptsNone);
2267       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2268       if (failing())  return;
2269     }
2270     ConnectionGraph::do_analysis(this, &igvn);
2271 
2272     if (failing())  return;
2273 
2274     // Optimize out fields loads from scalar replaceable allocations.
2275     igvn.optimize();
2276     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2277 
2278     if (failing())  return;
2279 
2280     if (congraph() != NULL && macro_count() > 0) {
2281       TracePhase tp("macroEliminate", &timers[_t_macroEliminate]);
2282       PhaseMacroExpand mexp(igvn);
2283       mexp.eliminate_macro_nodes();
2284       igvn.set_delay_transform(false);
2285 
2286       igvn.optimize();
2287       print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
2288 
2289       if (failing())  return;
2290     }
2291   }
2292 
2293   // Loop transforms on the ideal graph.  Range Check Elimination,
2294   // peeling, unrolling, etc.
2295 
2296   // Set loop opts counter
2297   loop_opts_cnt = num_loop_opts();
2298   if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
2299     {
2300       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2301       PhaseIdealLoop ideal_loop(igvn, LoopOptsDefault);
2302       loop_opts_cnt--;
2303       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
2304       if (failing())  return;
2305     }
2306     // Loop opts pass if partial peeling occurred in previous pass
2307     if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
2308       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2309       PhaseIdealLoop ideal_loop(igvn, LoopOptsSkipSplitIf);
2310       loop_opts_cnt--;
2311       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
2312       if (failing())  return;
2313     }
2314     // Loop opts pass for loop-unrolling before CCP
2315     if(major_progress() && (loop_opts_cnt > 0)) {
2316       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2317       PhaseIdealLoop ideal_loop(igvn, LoopOptsSkipSplitIf);
2318       loop_opts_cnt--;
2319       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
2320     }
2321     if (!failing()) {
2322       // Verify that last round of loop opts produced a valid graph
2323       TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2324       PhaseIdealLoop::verify(igvn);
2325     }
2326   }
2327   if (failing())  return;
2328 
2329   // Conditional Constant Propagation;
2330   PhaseCCP ccp( &igvn );
2331   assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
2332   {
2333     TracePhase tp("ccp", &timers[_t_ccp]);
2334     ccp.do_transform();
2335   }
2336   print_method(PHASE_CPP1, 2);
2337 
2338   assert( true, "Break here to ccp.dump_old2new_map()");
2339 
2340   // Iterative Global Value Numbering, including ideal transforms
2341   {
2342     TracePhase tp("iterGVN2", &timers[_t_iterGVN2]);
2343     igvn = ccp;
2344     igvn.optimize();
2345   }
2346 
2347   print_method(PHASE_ITER_GVN2, 2);
2348 
2349   if (failing())  return;
2350 
2351   // Loop transforms on the ideal graph.  Range Check Elimination,
2352   // peeling, unrolling, etc.
2353   if (!optimize_loops(loop_opts_cnt, igvn, LoopOptsDefault)) {
2354     return;








2355   }
2356 
2357 #if INCLUDE_ZGC
2358   if (UseZGC) {
2359     ZBarrierSetC2::find_dominating_barriers(igvn);
2360   }
2361 #endif
2362 
2363   if (failing())  return;
2364 
2365   // Ensure that major progress is now clear
2366   C->clear_major_progress();
2367 
2368   {
2369     // Verify that all previous optimizations produced a valid graph
2370     // at least to this point, even if no loop optimizations were done.
2371     TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2372     PhaseIdealLoop::verify(igvn);
2373   }
2374 


2376     // No more loop optimizations. Remove all range check dependent CastIINodes.
2377     C->remove_range_check_casts(igvn);
2378     igvn.optimize();
2379   }
2380 
2381 #ifdef ASSERT
2382   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2383   bs->verify_gc_barriers(false);
2384 #endif
2385 
2386   {
2387     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2388     PhaseMacroExpand  mex(igvn);
2389     print_method(PHASE_BEFORE_MACRO_EXPANSION, 2);
2390     if (mex.expand_macro_nodes()) {
2391       assert(failing(), "must bail out w/ explicit message");
2392       return;
2393     }
2394   }
2395 
2396   print_method(PHASE_BEFORE_BARRIER_EXPAND, 2);
2397 
2398 #if INCLUDE_SHENANDOAHGC
2399   if (UseShenandoahGC && ((ShenandoahBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2())->expand_barriers(this, igvn)) {
2400     assert(failing(), "must bail out w/ explicit message");
2401     return;
2402   }
2403 #endif
2404 
2405   if (opaque4_count() > 0) {
2406     C->remove_opaque4_nodes(igvn);
2407     igvn.optimize();
2408   }
2409 
2410   DEBUG_ONLY( _modified_nodes = NULL; )
2411  } // (End scope of igvn; run destructor if necessary for asserts.)
2412 
2413  process_print_inlining();
2414  // A method with only infinite loops has no edges entering loops from root
2415  {
2416    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2417    if (final_graph_reshaping()) {
2418      assert(failing(), "must bail out w/ explicit message");
2419      return;
2420    }
2421  }
2422 
2423  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2424 }


2825   // case Op_ConvD2L: // handled by leaf call
2826   case Op_ConD:
2827   case Op_CmpD:
2828   case Op_CmpD3:
2829     frc.inc_double_count();
2830     break;
2831   case Op_Opaque1:              // Remove Opaque Nodes before matching
2832   case Op_Opaque2:              // Remove Opaque Nodes before matching
2833   case Op_Opaque3:
2834     n->subsume_by(n->in(1), this);
2835     break;
2836   case Op_CallStaticJava:
2837   case Op_CallJava:
2838   case Op_CallDynamicJava:
2839     frc.inc_java_call_count(); // Count java call site;
2840   case Op_CallRuntime:
2841   case Op_CallLeaf:
2842   case Op_CallLeafNoFP: {
2843     assert (n->is_Call(), "");
2844     CallNode *call = n->as_Call();
2845 #if INCLUDE_SHENANDOAHGC
2846     if (UseShenandoahGC && ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) {
2847       uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt();
2848       if (call->req() > cnt) {
2849         assert(call->req() == cnt+1, "only one extra input");
2850         Node* addp = call->in(cnt);
2851         assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?");
2852         call->del_req(cnt);
2853       }
2854     }
2855 #endif
2856     // Count call sites where the FP mode bit would have to be flipped.
2857     // Do not count uncommon runtime calls:
2858     // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
2859     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
2860     if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
2861       frc.inc_call_count();   // Count the call site
2862     } else {                  // See if uncommon argument is shared
2863       Node *n = call->in(TypeFunc::Parms);
2864       int nop = n->Opcode();
2865       // Clone shared simple arguments to uncommon calls, item (1).
2866       if (n->outcnt() > 1 &&
2867           !n->is_Proj() &&
2868           nop != Op_CreateEx &&
2869           nop != Op_CheckCastPP &&
2870           nop != Op_DecodeN &&
2871           nop != Op_DecodeNKlass &&
2872           !n->is_Mem() &&
2873           !n->is_Phi()) {
2874         Node *x = n->clone();
2875         call->set_req(TypeFunc::Parms, x);


3400       // register allocation can be confused.
3401       ResourceMark rm;
3402       Unique_Node_List wq;
3403       wq.push(n->in(MemBarNode::Precedent));
3404       n->set_req(MemBarNode::Precedent, top());
3405       while (wq.size() > 0) {
3406         Node* m = wq.pop();
3407         if (m->outcnt() == 0) {
3408           for (uint j = 0; j < m->req(); j++) {
3409             Node* in = m->in(j);
3410             if (in != NULL) {
3411               wq.push(in);
3412             }
3413           }
3414           m->disconnect_inputs(NULL, this);
3415         }
3416       }
3417     }
3418     break;
3419   }
3420 #if INCLUDE_SHENANDOAHGC
3421   case Op_ShenandoahCompareAndSwapP:
3422   case Op_ShenandoahCompareAndSwapN:
3423   case Op_ShenandoahWeakCompareAndSwapN:
3424   case Op_ShenandoahWeakCompareAndSwapP:
3425   case Op_ShenandoahCompareAndExchangeP:
3426   case Op_ShenandoahCompareAndExchangeN:
3427 #ifdef ASSERT
3428     if( VerifyOptoOopOffsets ) {
3429       MemNode* mem  = n->as_Mem();
3430       // Check to see if address types have grounded out somehow.
3431       const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
3432       ciInstanceKlass *k = tp->klass()->as_instance_klass();
3433       bool oop_offset_is_sane = k->contains_field_offset(tp->offset());
3434       assert( !tp || oop_offset_is_sane, "" );
3435     }
3436 #endif
3437      break;
3438   case Op_ShenandoahLoadReferenceBarrier:
3439     assert(false, "should have been expanded already");
3440     break;
3441 #endif
3442   case Op_RangeCheck: {
3443     RangeCheckNode* rc = n->as_RangeCheck();
3444     Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
3445     n->subsume_by(iff, this);
3446     frc._tests.push(iff);
3447     break;
3448   }
3449   case Op_ConvI2L: {
3450     if (!Matcher::convi2l_type_required) {
3451       // Code generation on some platforms doesn't need accurate
3452       // ConvI2L types. Widening the type can help remove redundant
3453       // address computations.
3454       n->as_Type()->set_type(TypeLong::INT);
3455       ResourceMark rm;
3456       Node_List wq;
3457       wq.push(n);
3458       for (uint next = 0; next < wq.size(); next++) {
3459         Node *m = wq.at(next);
3460 
3461         for(;;) {


3859           if (use->is_Con())        continue;  // a dead ConNode is OK
3860           // At this point, we have found a dead node which is DU-reachable.
3861           if (!dead_nodes) {
3862             tty->print_cr("*** Dead nodes reachable via DU edges:");
3863             dead_nodes = true;
3864           }
3865           use->dump(2);
3866           tty->print_cr("---");
3867           checked.push(use);  // No repeats; pretend it is now checked.
3868         }
3869       }
3870       assert(!dead_nodes, "using nodes must be reachable from root");
3871     }
3872   }
3873 }
3874 
3875 // Verify GC barriers consistency
3876 // Currently supported:
3877 // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
3878 void Compile::verify_barriers() {
3879 #if INCLUDE_G1GC || INCLUDE_SHENANDOAHGC
3880   if (UseG1GC || UseShenandoahGC) {
3881     // Verify G1 pre-barriers
3882 
3883 #if INCLUDE_G1GC && INCLUDE_SHENANDOAHGC
3884     const int marking_offset = in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_active_offset()
3885                                                 : ShenandoahThreadLocalData::satb_mark_queue_active_offset());
3886 #elif INCLUDE_G1GC
3887     const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
3888 #else
3889     const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset());
3890 #endif
3891 
3892     ResourceArea *area = Thread::current()->resource_area();
3893     Unique_Node_List visited(area);
3894     Node_List worklist(area);
3895     // We're going to walk control flow backwards starting from the Root
3896     worklist.push(_root);
3897     while (worklist.size() > 0) {
3898       Node* x = worklist.pop();
3899       if (x == NULL || x == top()) continue;
3900       if (visited.member(x)) {
3901         continue;
3902       } else {
3903         visited.push(x);
3904       }
3905 
3906       if (x->is_Region()) {
3907         for (uint i = 1; i < x->req(); i++) {
3908           worklist.push(x->in(i));
3909         }
3910       } else {


4626 
4627 /**
4628  * Remove the speculative part of types and clean up the graph
4629  */
4630 void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
4631   if (UseTypeSpeculation) {
4632     Unique_Node_List worklist;
4633     worklist.push(root());
4634     int modified = 0;
4635     // Go over all type nodes that carry a speculative type, drop the
4636     // speculative part of the type and enqueue the node for an igvn
4637     // which may optimize it out.
4638     for (uint next = 0; next < worklist.size(); ++next) {
4639       Node *n  = worklist.at(next);
4640       if (n->is_Type()) {
4641         TypeNode* tn = n->as_Type();
4642         const Type* t = tn->type();
4643         const Type* t_no_spec = t->remove_speculative();
4644         if (t_no_spec != t) {
4645           bool in_hash = igvn.hash_delete(n);
4646           assert(in_hash || (UseShenandoahGC && n->hash() == Node::NO_HASH), "node should be in igvn hash table");
4647           tn->set_type(t_no_spec);
4648           igvn.hash_insert(n);
4649           igvn._worklist.push(n); // give it a chance to go away
4650           modified++;
4651         }
4652       }
4653       uint max = n->len();
4654       for( uint i = 0; i < max; ++i ) {
4655         Node *m = n->in(i);
4656         if (not_a_node(m))  continue;
4657         worklist.push(m);
4658       }
4659     }
4660     // Drop the speculative part of all types in the igvn's type table
4661     igvn.remove_speculative_types();
4662     if (modified > 0) {
4663       igvn.optimize();
4664     }
4665 #ifdef ASSERT
4666     // Verify that after the IGVN is over no speculative type has resurfaced


< prev index next >