< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page




  65 #include "opto/phaseX.hpp"
  66 #include "opto/rootnode.hpp"
  67 #include "opto/runtime.hpp"
  68 #include "opto/stringopts.hpp"
  69 #include "opto/type.hpp"
  70 #include "opto/vectornode.hpp"
  71 #include "runtime/arguments.hpp"
  72 #include "runtime/sharedRuntime.hpp"
  73 #include "runtime/signature.hpp"
  74 #include "runtime/stubRoutines.hpp"
  75 #include "runtime/timer.hpp"
  76 #include "utilities/align.hpp"
  77 #include "utilities/copy.hpp"
  78 #include "utilities/macros.hpp"
  79 #if INCLUDE_G1GC
  80 #include "gc/g1/g1ThreadLocalData.hpp"
  81 #endif // INCLUDE_G1GC
  82 #if INCLUDE_ZGC
  83 #include "gc/z/c2/zBarrierSetC2.hpp"
  84 #endif



  85 
  86 
  87 // -------------------- Compile::mach_constant_base_node -----------------------
  88 // Constant table base node singleton.
  89 MachConstantBaseNode* Compile::mach_constant_base_node() {
  90   if (_mach_constant_base_node == NULL) {
  91     _mach_constant_base_node = new MachConstantBaseNode();
  92     _mach_constant_base_node->add_req(C->root());
  93   }
  94   return _mach_constant_base_node;
  95 }
  96 
  97 
  98 /// Support for intrinsics.
  99 
 100 // Return the index at which m must be inserted (or already exists).
 101 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
 102 class IntrinsicDescPair {
 103  private:
 104   ciMethod* _m;


 374       // We're done with a parsing phase. Replaced nodes are not valid
 375       // beyond that point.
 376       n->as_SafePoint()->delete_replaced_nodes();
 377     }
 378     // Use raw traversal of out edges since this code removes out edges
 379     int max = n->outcnt();
 380     for (int j = 0; j < max; ++j) {
 381       Node* child = n->raw_out(j);
 382       if (! useful.member(child)) {
 383         assert(!child->is_top() || child != top(),
 384                "If top is cached in Compile object it is in useful list");
 385         // Only need to remove this out-edge to the useless node
 386         n->raw_del_out(j);
 387         --j;
 388         --max;
 389       }
 390     }
 391     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 392       record_for_igvn(n->unique_out());
 393     }








 394   }
 395   // Remove useless macro and predicate opaq nodes
 396   for (int i = C->macro_count()-1; i >= 0; i--) {
 397     Node* n = C->macro_node(i);
 398     if (!useful.member(n)) {
 399       remove_macro_node(n);
 400     }
 401   }
 402   // Remove useless CastII nodes with range check dependency
 403   for (int i = range_check_cast_count() - 1; i >= 0; i--) {
 404     Node* cast = range_check_cast_node(i);
 405     if (!useful.member(cast)) {
 406       remove_range_check_cast(cast);
 407     }
 408   }
 409   // Remove useless expensive nodes
 410   for (int i = C->expensive_count()-1; i >= 0; i--) {
 411     Node* n = C->expensive_node(i);
 412     if (!useful.member(n)) {
 413       remove_expensive_node(n);


 628 // ============================================================================
 629 //------------------------------Compile standard-------------------------------
 630 debug_only( int Compile::_debug_idx = 100000; )
 631 
 632 // Compile a method.  entry_bci is -1 for normal compilations and indicates
 633 // the continuation bci for on stack replacement.
 634 
 635 
 636 Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci,
 637                   bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing, DirectiveSet* directive)
 638                 : Phase(Compiler),
 639                   _env(ci_env),
 640                   _directive(directive),
 641                   _log(ci_env->log()),
 642                   _compile_id(ci_env->compile_id()),
 643                   _save_argument_registers(false),
 644                   _stub_name(NULL),
 645                   _stub_function(NULL),
 646                   _stub_entry_point(NULL),
 647                   _method(target),
 648                   _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 649                   _entry_bci(osr_bci),
 650                   _initial_gvn(NULL),
 651                   _for_igvn(NULL),
 652                   _warm_calls(NULL),
 653                   _subsume_loads(subsume_loads),
 654                   _do_escape_analysis(do_escape_analysis),
 655                   _eliminate_boxing(eliminate_boxing),
 656                   _failure_reason(NULL),
 657                   _code_buffer("Compile::Fill_buffer"),
 658                   _orig_pc_slot(0),
 659                   _orig_pc_slot_offset_in_bytes(0),
 660                   _has_method_handle_invokes(false),
 661                   _mach_constant_base_node(NULL),
 662                   _node_bundling_limit(0),
 663                   _node_bundling_base(NULL),
 664                   _java_calls(0),
 665                   _inner_loops(0),
 666                   _scratch_const_size(-1),
 667                   _in_scratch_emit_size(false),
 668                   _dead_node_list(comp_arena()),
 669                   _dead_node_count(0),
 670 #ifndef PRODUCT
 671                   _trace_opto_output(directive->TraceOptoOutputOption),
 672                   _in_dump_cnt(0),
 673                   _printer(IdealGraphPrinter::printer()),
 674 #endif
 675                   _congraph(NULL),
 676                   _comp_arena(mtCompiler),
 677                   _node_arena(mtCompiler),
 678                   _old_arena(mtCompiler),

 679                   _Compile_types(mtCompiler),
 680                   _replay_inline_data(NULL),
 681                   _late_inlines(comp_arena(), 2, 0, NULL),
 682                   _string_late_inlines(comp_arena(), 2, 0, NULL),
 683                   _boxing_late_inlines(comp_arena(), 2, 0, NULL),
 684                   _late_inlines_pos(0),
 685                   _number_of_mh_late_inlines(0),
 686                   _inlining_progress(false),
 687                   _inlining_incrementally(false),
 688                   _print_inlining_list(NULL),
 689                   _print_inlining_stream(NULL),
 690                   _print_inlining_idx(0),
 691                   _print_inlining_output(NULL),
 692                   _interpreter_frame_size(0),
 693                   _max_node_limit(MaxNodeLimit),
 694                   _has_reserved_stack_access(target->has_reserved_stack_access()) {
 695   C = this;
 696 #ifndef PRODUCT
 697   if (_printer != NULL) {
 698     _printer->set_compile(this);


1439     }
1440   } else if( ta && _AliasLevel >= 2 ) {
1441     // For arrays indexed by constant indices, we flatten the alias
1442     // space to include all of the array body.  Only the header, klass
1443     // and array length can be accessed un-aliased.
1444     if( offset != Type::OffsetBot ) {
1445       if( ta->const_oop() ) { // MethodData* or Method*
1446         offset = Type::OffsetBot;   // Flatten constant access into array body
1447         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
1448       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1449         // range is OK as-is.
1450         tj = ta = TypeAryPtr::RANGE;
1451       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1452         tj = TypeInstPtr::KLASS; // all klass loads look alike
1453         ta = TypeAryPtr::RANGE; // generic ignored junk
1454         ptr = TypePtr::BotPTR;
1455       } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1456         tj = TypeInstPtr::MARK;
1457         ta = TypeAryPtr::RANGE; // generic ignored junk
1458         ptr = TypePtr::BotPTR;





1459       } else {                  // Random constant offset into array body
1460         offset = Type::OffsetBot;   // Flatten constant access into array body
1461         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
1462       }
1463     }
1464     // Arrays of fixed size alias with arrays of unknown size.
1465     if (ta->size() != TypeInt::POS) {
1466       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1467       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
1468     }
1469     // Arrays of known objects become arrays of unknown objects.
1470     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1471       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1472       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1473     }
1474     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1475       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1476       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1477     }
1478     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so


1503         tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1504       }
1505     } else if( is_known_inst ) {
1506       tj = to; // Keep NotNull and klass_is_exact for instance type
1507     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1508       // During the 2nd round of IterGVN, NotNull castings are removed.
1509       // Make sure the Bottom and NotNull variants alias the same.
1510       // Also, make sure exact and non-exact variants alias the same.
1511       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1512     }
1513     if (to->speculative() != NULL) {
1514       tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
1515     }
1516     // Canonicalize the holder of this field
1517     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1518       // First handle header references such as a LoadKlassNode, even if the
1519       // object's klass is unloaded at compile time (4965979).
1520       if (!is_known_inst) { // Do it only for non-instance types
1521         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
1522       }
1523     } else if (offset < 0 || offset >= k->size_helper() * wordSize) {
1524       // Static fields are in the space above the normal instance
1525       // fields in the java.lang.Class instance.
1526       if (to->klass() != ciEnv::current()->Class_klass()) {
1527         to = NULL;
1528         tj = TypeOopPtr::BOTTOM;
1529         offset = tj->offset();
1530       }
1531     } else {
1532       ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
1533       if (!k->equals(canonical_holder) || tj->offset() != offset) {
1534         if( is_known_inst ) {
1535           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
1536         } else {
1537           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
1538         }
1539       }
1540     }
1541   }
1542 
1543   // Klass pointers to object array klasses need some flattening


1601     case Type::AnyPtr:   tj = TypePtr::BOTTOM;      break;  // caller checks it
1602     default: ShouldNotReachHere();
1603     }
1604     break;
1605   case 2:                       // No collapsing at level 2; keep all splits
1606   case 3:                       // No collapsing at level 3; keep all splits
1607     break;
1608   default:
1609     Unimplemented();
1610   }
1611 
1612   offset = tj->offset();
1613   assert( offset != Type::OffsetTop, "Offset has fallen from constant" );
1614 
1615   assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) ||
1616           (offset == Type::OffsetBot && tj->base() == Type::AryPtr) ||
1617           (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) ||
1618           (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) ||
1619           (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1620           (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1621           (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr)  ,

1622           "For oops, klasses, raw offset must be constant; for arrays the offset is never known" );
1623   assert( tj->ptr() != TypePtr::TopPTR &&
1624           tj->ptr() != TypePtr::AnyNull &&
1625           tj->ptr() != TypePtr::Null, "No imprecise addresses" );
1626 //    assert( tj->ptr() != TypePtr::Constant ||
1627 //            tj->base() == Type::RawPtr ||
1628 //            tj->base() == Type::KlassPtr, "No constant oop addresses" );
1629 
1630   return tj;
1631 }
1632 
1633 void Compile::AliasType::Init(int i, const TypePtr* at) {
1634   _index = i;
1635   _adr_type = at;
1636   _field = NULL;
1637   _element = NULL;
1638   _is_rewritable = true; // default
1639   const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
1640   if (atoop != NULL && atoop->is_known_instance()) {
1641     const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot);


2087 }
2088 
2089 // Perform incremental inlining until bound on number of live nodes is reached
2090 void Compile::inline_incrementally(PhaseIterGVN& igvn) {
2091   TracePhase tp("incrementalInline", &timers[_t_incrInline]);
2092 
2093   PhaseGVN* gvn = initial_gvn();
2094 
2095   set_inlining_incrementally(true);
2096   set_inlining_progress(true);
2097   uint low_live_nodes = 0;
2098 
2099   while(inlining_progress() && _late_inlines.length() > 0) {
2100 
2101     if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
2102       if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
2103         TracePhase tp("incrementalInline_ideal", &timers[_t_incrInline_ideal]);
2104         // PhaseIdealLoop is expensive so we only try it once we are
2105         // out of live nodes and we only try it again if the previous
2106         // helped got the number of nodes down significantly
2107         PhaseIdealLoop ideal_loop( igvn, false, true );
2108         if (failing())  return;
2109         low_live_nodes = live_nodes();
2110         _major_progress = true;
2111       }
2112 
2113       if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
2114         break;
2115       }
2116     }
2117 
2118     inline_incrementally_one(igvn);
2119 
2120     if (failing())  return;
2121 
2122     {
2123       TracePhase tp("incrementalInline_igvn", &timers[_t_incrInline_igvn]);
2124       igvn.optimize();
2125     }
2126 
2127     if (failing())  return;


2138 
2139     if (failing())  return;
2140 
2141     {
2142       TracePhase tp("incrementalInline_pru", &timers[_t_incrInline_pru]);
2143       ResourceMark rm;
2144       PhaseRemoveUseless pru(initial_gvn(), for_igvn());
2145     }
2146 
2147     {
2148       TracePhase tp("incrementalInline_igvn", &timers[_t_incrInline_igvn]);
2149       igvn = PhaseIterGVN(gvn);
2150       igvn.optimize();
2151     }
2152   }
2153 
2154   set_inlining_incrementally(false);
2155 }
2156 
2157 















2158 //------------------------------Optimize---------------------------------------
2159 // Given a graph, optimize it.
2160 void Compile::Optimize() {
2161   TracePhase tp("optimizer", &timers[_t_optimizer]);
2162 
2163 #ifndef PRODUCT
2164   if (_directive->BreakAtCompileOption) {
2165     BREAKPOINT;
2166   }
2167 
2168 #endif
2169 
2170 #ifdef ASSERT
2171   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2172   bs->verify_gc_barriers(true);
2173 #endif
2174 
2175   ResourceMark rm;
2176   int          loop_opts_cnt;
2177 
2178   print_inlining_reinit();
2179 
2180   NOT_PRODUCT( verify_graph_edges(); )
2181 
2182   print_method(PHASE_AFTER_PARSING);
2183 
2184  {
2185   // Iterative Global Value Numbering, including ideal transforms
2186   // Initialize IterGVN with types and values from parse-time GVN
2187   PhaseIterGVN igvn(initial_gvn());
2188 #ifdef ASSERT
2189   _modified_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
2190 #endif
2191   {
2192     TracePhase tp("iterGVN", &timers[_t_iterGVN]);
2193     igvn.optimize();
2194   }
2195 
2196   print_method(PHASE_ITER_GVN1, 2);
2197 
2198   if (failing())  return;
2199 


2200   inline_incrementally(igvn);
2201 
2202   print_method(PHASE_INCREMENTAL_INLINE, 2);
2203 
2204   if (failing())  return;
2205 
2206   if (eliminate_boxing()) {
2207     // Inline valueOf() methods now.
2208     inline_boxing_calls(igvn);
2209 
2210     if (AlwaysIncrementalInline) {
2211       inline_incrementally(igvn);
2212     }
2213 
2214     print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2215 
2216     if (failing())  return;
2217   }
2218 
2219   // Remove the speculative part of types and clean up the graph from


2228 
2229   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2230     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2231     initial_gvn()->replace_with(&igvn);
2232     for_igvn()->clear();
2233     Unique_Node_List new_worklist(C->comp_arena());
2234     {
2235       ResourceMark rm;
2236       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2237     }
2238     set_for_igvn(&new_worklist);
2239     igvn = PhaseIterGVN(initial_gvn());
2240     igvn.optimize();
2241   }
2242 
2243   // Perform escape analysis
2244   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2245     if (has_loops()) {
2246       // Cleanup graph (remove dead nodes).
2247       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2248       PhaseIdealLoop ideal_loop( igvn, false, true );
2249       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2250       if (failing())  return;
2251     }
2252     ConnectionGraph::do_analysis(this, &igvn);
2253 
2254     if (failing())  return;
2255 
2256     // Optimize out fields loads from scalar replaceable allocations.
2257     igvn.optimize();
2258     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2259 
2260     if (failing())  return;
2261 
2262     if (congraph() != NULL && macro_count() > 0) {
2263       TracePhase tp("macroEliminate", &timers[_t_macroEliminate]);
2264       PhaseMacroExpand mexp(igvn);
2265       mexp.eliminate_macro_nodes();
2266       igvn.set_delay_transform(false);
2267 
2268       igvn.optimize();
2269       print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
2270 
2271       if (failing())  return;
2272     }
2273   }
2274 
2275   // Loop transforms on the ideal graph.  Range Check Elimination,
2276   // peeling, unrolling, etc.
2277 
2278   // Set loop opts counter
2279   loop_opts_cnt = num_loop_opts();
2280   if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
2281     {
2282       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2283       PhaseIdealLoop ideal_loop( igvn, true );
2284       loop_opts_cnt--;
2285       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
2286       if (failing())  return;
2287     }
2288     // Loop opts pass if partial peeling occurred in previous pass
2289     if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
2290       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2291       PhaseIdealLoop ideal_loop( igvn, false );
2292       loop_opts_cnt--;
2293       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
2294       if (failing())  return;
2295     }
2296     // Loop opts pass for loop-unrolling before CCP
2297     if(major_progress() && (loop_opts_cnt > 0)) {
2298       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2299       PhaseIdealLoop ideal_loop( igvn, false );
2300       loop_opts_cnt--;
2301       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
2302     }
2303     if (!failing()) {
2304       // Verify that last round of loop opts produced a valid graph
2305       TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2306       PhaseIdealLoop::verify(igvn);
2307     }
2308   }
2309   if (failing())  return;
2310 
2311   // Conditional Constant Propagation;
2312   PhaseCCP ccp( &igvn );
2313   assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
2314   {
2315     TracePhase tp("ccp", &timers[_t_ccp]);
2316     ccp.do_transform();
2317   }
2318   print_method(PHASE_CPP1, 2);
2319 
2320   assert( true, "Break here to ccp.dump_old2new_map()");
2321 
2322   // Iterative Global Value Numbering, including ideal transforms
2323   {
2324     TracePhase tp("iterGVN2", &timers[_t_iterGVN2]);
2325     igvn = ccp;
2326     igvn.optimize();
2327   }
2328 
2329   print_method(PHASE_ITER_GVN2, 2);
2330 
2331   if (failing())  return;
2332 
2333   // Loop transforms on the ideal graph.  Range Check Elimination,
2334   // peeling, unrolling, etc.
2335   if(loop_opts_cnt > 0) {
2336     debug_only( int cnt = 0; );
2337     while(major_progress() && (loop_opts_cnt > 0)) {
2338       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2339       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2340       PhaseIdealLoop ideal_loop( igvn, true);
2341       loop_opts_cnt--;
2342       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2343       if (failing())  return;
2344     }
2345   }
2346 
2347 #if INCLUDE_ZGC
2348   if (UseZGC) {
2349     ZBarrierSetC2::find_dominating_barriers(igvn);
2350   }
2351 #endif
2352 
2353   if (failing())  return;
2354 
2355   // Ensure that major progress is now clear
2356   C->clear_major_progress();
2357 
2358   {
2359     // Verify that all previous optimizations produced a valid graph
2360     // at least to this point, even if no loop optimizations were done.
2361     TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2362     PhaseIdealLoop::verify(igvn);
2363   }
2364 


2366     // No more loop optimizations. Remove all range check dependent CastIINodes.
2367     C->remove_range_check_casts(igvn);
2368     igvn.optimize();
2369   }
2370 
2371 #ifdef ASSERT
2372   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2373   bs->verify_gc_barriers(false);
2374 #endif
2375 
2376   {
2377     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2378     PhaseMacroExpand  mex(igvn);
2379     print_method(PHASE_BEFORE_MACRO_EXPANSION, 2);
2380     if (mex.expand_macro_nodes()) {
2381       assert(failing(), "must bail out w/ explicit message");
2382       return;
2383     }
2384   }
2385 









2386   if (opaque4_count() > 0) {
2387     C->remove_opaque4_nodes(igvn);
2388     igvn.optimize();
2389   }
2390 
2391   DEBUG_ONLY( _modified_nodes = NULL; )
2392  } // (End scope of igvn; run destructor if necessary for asserts.)
2393 
2394  process_print_inlining();
2395  // A method with only infinite loops has no edges entering loops from root
2396  {
2397    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2398    if (final_graph_reshaping()) {
2399      assert(failing(), "must bail out w/ explicit message");
2400      return;
2401    }
2402  }
2403 
2404  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2405 }


2795   // case Op_ConvD2L: // handled by leaf call
2796   case Op_ConD:
2797   case Op_CmpD:
2798   case Op_CmpD3:
2799     frc.inc_double_count();
2800     break;
2801   case Op_Opaque1:              // Remove Opaque Nodes before matching
2802   case Op_Opaque2:              // Remove Opaque Nodes before matching
2803   case Op_Opaque3:
2804     n->subsume_by(n->in(1), this);
2805     break;
2806   case Op_CallStaticJava:
2807   case Op_CallJava:
2808   case Op_CallDynamicJava:
2809     frc.inc_java_call_count(); // Count java call site;
2810   case Op_CallRuntime:
2811   case Op_CallLeaf:
2812   case Op_CallLeafNoFP: {
2813     assert (n->is_Call(), "");
2814     CallNode *call = n->as_Call();











2815     // Count call sites where the FP mode bit would have to be flipped.
2816     // Do not count uncommon runtime calls:
2817     // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
2818     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
2819     if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
2820       frc.inc_call_count();   // Count the call site
2821     } else {                  // See if uncommon argument is shared
2822       Node *n = call->in(TypeFunc::Parms);
2823       int nop = n->Opcode();
2824       // Clone shared simple arguments to uncommon calls, item (1).
2825       if (n->outcnt() > 1 &&
2826           !n->is_Proj() &&
2827           nop != Op_CreateEx &&
2828           nop != Op_CheckCastPP &&
2829           nop != Op_DecodeN &&
2830           nop != Op_DecodeNKlass &&
2831           !n->is_Mem() &&
2832           !n->is_Phi()) {
2833         Node *x = n->clone();
2834         call->set_req(TypeFunc::Parms, x);


2997 #endif
2998     // platform dependent reshaping of the address expression
2999     reshape_address(n->as_AddP());
3000     break;
3001   }
3002 
3003   case Op_CastPP: {
3004     // Remove CastPP nodes to gain more freedom during scheduling but
3005     // keep the dependency they encode as control or precedence edges
3006     // (if control is set already) on memory operations. Some CastPP
3007     // nodes don't have a control (don't carry a dependency): skip
3008     // those.
3009     if (n->in(0) != NULL) {
3010       ResourceMark rm;
3011       Unique_Node_List wq;
3012       wq.push(n);
3013       for (uint next = 0; next < wq.size(); ++next) {
3014         Node *m = wq.at(next);
3015         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
3016           Node* use = m->fast_out(i);
3017           if (use->is_Mem() || use->is_EncodeNarrowPtr()) {
3018             use->ensure_control_or_add_prec(n->in(0));
3019           } else {
3020             switch(use->Opcode()) {
3021             case Op_AddP:
3022             case Op_DecodeN:
3023             case Op_DecodeNKlass:
3024             case Op_CheckCastPP:
3025             case Op_CastPP:
3026               wq.push(use);
3027               break;
3028             }
3029           }
3030         }
3031       }
3032     }
3033     const bool is_LP64 = LP64_ONLY(true) NOT_LP64(false);
3034     if (is_LP64 && n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
3035       Node* in1 = n->in(1);
3036       const Type* t = n->bottom_type();
3037       Node* new_in1 = in1->clone();


3333         }
3334       } else {
3335         if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
3336           Node* shift = new AndINode(in2, ConNode::make(TypeInt::make(mask)));
3337           n->set_req(2, shift);
3338         }
3339       }
3340       if (in2->outcnt() == 0) { // Remove dead node
3341         in2->disconnect_inputs(NULL, this);
3342       }
3343     }
3344     break;
3345   case Op_MemBarStoreStore:
3346   case Op_MemBarRelease:
3347     // Break the link with AllocateNode: it is no longer useful and
3348     // confuses register allocation.
3349     if (n->req() > MemBarNode::Precedent) {
3350       n->set_req(MemBarNode::Precedent, top());
3351     }
3352     break;







3353   case Op_RangeCheck: {
3354     RangeCheckNode* rc = n->as_RangeCheck();
3355     Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
3356     n->subsume_by(iff, this);
3357     frc._tests.push(iff);
3358     break;
3359   }
3360   case Op_ConvI2L: {
3361     if (!Matcher::convi2l_type_required) {
3362       // Code generation on some platforms doesn't need accurate
3363       // ConvI2L types. Widening the type can help remove redundant
3364       // address computations.
3365       n->as_Type()->set_type(TypeLong::INT);
3366       ResourceMark rm;
3367       Node_List wq;
3368       wq.push(n);
3369       for (uint next = 0; next < wq.size(); next++) {
3370         Node *m = wq.at(next);
3371 
3372         for(;;) {


3770           if (use->is_Con())        continue;  // a dead ConNode is OK
3771           // At this point, we have found a dead node which is DU-reachable.
3772           if (!dead_nodes) {
3773             tty->print_cr("*** Dead nodes reachable via DU edges:");
3774             dead_nodes = true;
3775           }
3776           use->dump(2);
3777           tty->print_cr("---");
3778           checked.push(use);  // No repeats; pretend it is now checked.
3779         }
3780       }
3781       assert(!dead_nodes, "using nodes must be reachable from root");
3782     }
3783   }
3784 }
3785 
3786 // Verify GC barriers consistency
3787 // Currently supported:
3788 // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
3789 void Compile::verify_barriers() {
3790 #if INCLUDE_G1GC
3791   if (UseG1GC) {
3792     // Verify G1 pre-barriers





3793     const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());



3794 
3795     ResourceArea *area = Thread::current()->resource_area();
3796     Unique_Node_List visited(area);
3797     Node_List worklist(area);
3798     // We're going to walk control flow backwards starting from the Root
3799     worklist.push(_root);
3800     while (worklist.size() > 0) {
3801       Node* x = worklist.pop();
3802       if (x == NULL || x == top()) continue;
3803       if (visited.member(x)) {
3804         continue;
3805       } else {
3806         visited.push(x);
3807       }
3808 
3809       if (x->is_Region()) {
3810         for (uint i = 1; i < x->req(); i++) {
3811           worklist.push(x->in(i));
3812         }
3813       } else {


4529 
4530 /**
4531  * Remove the speculative part of types and clean up the graph
4532  */
4533 void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
4534   if (UseTypeSpeculation) {
4535     Unique_Node_List worklist;
4536     worklist.push(root());
4537     int modified = 0;
4538     // Go over all type nodes that carry a speculative type, drop the
4539     // speculative part of the type and enqueue the node for an igvn
4540     // which may optimize it out.
4541     for (uint next = 0; next < worklist.size(); ++next) {
4542       Node *n  = worklist.at(next);
4543       if (n->is_Type()) {
4544         TypeNode* tn = n->as_Type();
4545         const Type* t = tn->type();
4546         const Type* t_no_spec = t->remove_speculative();
4547         if (t_no_spec != t) {
4548           bool in_hash = igvn.hash_delete(n);
4549           assert(in_hash, "node should be in igvn hash table");
4550           tn->set_type(t_no_spec);
4551           igvn.hash_insert(n);
4552           igvn._worklist.push(n); // give it a chance to go away
4553           modified++;
4554         }
4555       }
4556       uint max = n->len();
4557       for( uint i = 0; i < max; ++i ) {
4558         Node *m = n->in(i);
4559         if (not_a_node(m))  continue;
4560         worklist.push(m);
4561       }
4562     }
4563     // Drop the speculative part of all types in the igvn's type table
4564     igvn.remove_speculative_types();
4565     if (modified > 0) {
4566       igvn.optimize();
4567     }
4568 #ifdef ASSERT
4569     // Verify that after the IGVN is over no speculative type has resurfaced




  65 #include "opto/phaseX.hpp"
  66 #include "opto/rootnode.hpp"
  67 #include "opto/runtime.hpp"
  68 #include "opto/stringopts.hpp"
  69 #include "opto/type.hpp"
  70 #include "opto/vectornode.hpp"
  71 #include "runtime/arguments.hpp"
  72 #include "runtime/sharedRuntime.hpp"
  73 #include "runtime/signature.hpp"
  74 #include "runtime/stubRoutines.hpp"
  75 #include "runtime/timer.hpp"
  76 #include "utilities/align.hpp"
  77 #include "utilities/copy.hpp"
  78 #include "utilities/macros.hpp"
  79 #if INCLUDE_G1GC
  80 #include "gc/g1/g1ThreadLocalData.hpp"
  81 #endif // INCLUDE_G1GC
  82 #if INCLUDE_ZGC
  83 #include "gc/z/c2/zBarrierSetC2.hpp"
  84 #endif
  85 #if INCLUDE_SHENANDOAHGC
  86 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  87 #endif
  88 
  89 
  90 // -------------------- Compile::mach_constant_base_node -----------------------
  91 // Constant table base node singleton.
  92 MachConstantBaseNode* Compile::mach_constant_base_node() {
  93   if (_mach_constant_base_node == NULL) {
  94     _mach_constant_base_node = new MachConstantBaseNode();
  95     _mach_constant_base_node->add_req(C->root());
  96   }
  97   return _mach_constant_base_node;
  98 }
  99 
 100 
 101 /// Support for intrinsics.
 102 
 103 // Return the index at which m must be inserted (or already exists).
 104 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
 105 class IntrinsicDescPair {
 106  private:
 107   ciMethod* _m;


 377       // We're done with a parsing phase. Replaced nodes are not valid
 378       // beyond that point.
 379       n->as_SafePoint()->delete_replaced_nodes();
 380     }
 381     // Use raw traversal of out edges since this code removes out edges
 382     int max = n->outcnt();
 383     for (int j = 0; j < max; ++j) {
 384       Node* child = n->raw_out(j);
 385       if (! useful.member(child)) {
 386         assert(!child->is_top() || child != top(),
 387                "If top is cached in Compile object it is in useful list");
 388         // Only need to remove this out-edge to the useless node
 389         n->raw_del_out(j);
 390         --j;
 391         --max;
 392       }
 393     }
 394     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 395       record_for_igvn(n->unique_out());
 396     }
 397 #if INCLUDE_SHENANDOAHGC
 398     // TODO: Move into below eliminate_useless_gc_barriers(..) below
 399     if (n->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(n)) {
 400       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 401         record_for_igvn(n->fast_out(i));
 402       }
 403     }
 404 #endif
 405   }
 406   // Remove useless macro and predicate opaq nodes
 407   for (int i = C->macro_count()-1; i >= 0; i--) {
 408     Node* n = C->macro_node(i);
 409     if (!useful.member(n)) {
 410       remove_macro_node(n);
 411     }
 412   }
 413   // Remove useless CastII nodes with range check dependency
 414   for (int i = range_check_cast_count() - 1; i >= 0; i--) {
 415     Node* cast = range_check_cast_node(i);
 416     if (!useful.member(cast)) {
 417       remove_range_check_cast(cast);
 418     }
 419   }
 420   // Remove useless expensive nodes
 421   for (int i = C->expensive_count()-1; i >= 0; i--) {
 422     Node* n = C->expensive_node(i);
 423     if (!useful.member(n)) {
 424       remove_expensive_node(n);


 639 // ============================================================================
 640 //------------------------------Compile standard-------------------------------
 641 debug_only( int Compile::_debug_idx = 100000; )
 642 
 643 // Compile a method.  entry_bci is -1 for normal compilations and indicates
 644 // the continuation bci for on stack replacement.
 645 
 646 
 647 Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci,
 648                   bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing, DirectiveSet* directive)
 649                 : Phase(Compiler),
 650                   _env(ci_env),
 651                   _directive(directive),
 652                   _log(ci_env->log()),
 653                   _compile_id(ci_env->compile_id()),
 654                   _save_argument_registers(false),
 655                   _stub_name(NULL),
 656                   _stub_function(NULL),
 657                   _stub_entry_point(NULL),
 658                   _method(target),

 659                   _entry_bci(osr_bci),
 660                   _initial_gvn(NULL),
 661                   _for_igvn(NULL),
 662                   _warm_calls(NULL),
 663                   _subsume_loads(subsume_loads),
 664                   _do_escape_analysis(do_escape_analysis),
 665                   _eliminate_boxing(eliminate_boxing),
 666                   _failure_reason(NULL),
 667                   _code_buffer("Compile::Fill_buffer"),
 668                   _orig_pc_slot(0),
 669                   _orig_pc_slot_offset_in_bytes(0),
 670                   _has_method_handle_invokes(false),
 671                   _mach_constant_base_node(NULL),
 672                   _node_bundling_limit(0),
 673                   _node_bundling_base(NULL),
 674                   _java_calls(0),
 675                   _inner_loops(0),
 676                   _scratch_const_size(-1),
 677                   _in_scratch_emit_size(false),
 678                   _dead_node_list(comp_arena()),
 679                   _dead_node_count(0),
 680 #ifndef PRODUCT
 681                   _trace_opto_output(directive->TraceOptoOutputOption),
 682                   _in_dump_cnt(0),
 683                   _printer(IdealGraphPrinter::printer()),
 684 #endif
 685                   _congraph(NULL),
 686                   _comp_arena(mtCompiler),
 687                   _node_arena(mtCompiler),
 688                   _old_arena(mtCompiler),
 689                   _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 690                   _Compile_types(mtCompiler),
 691                   _replay_inline_data(NULL),
 692                   _late_inlines(comp_arena(), 2, 0, NULL),
 693                   _string_late_inlines(comp_arena(), 2, 0, NULL),
 694                   _boxing_late_inlines(comp_arena(), 2, 0, NULL),
 695                   _late_inlines_pos(0),
 696                   _number_of_mh_late_inlines(0),
 697                   _inlining_progress(false),
 698                   _inlining_incrementally(false),
 699                   _print_inlining_list(NULL),
 700                   _print_inlining_stream(NULL),
 701                   _print_inlining_idx(0),
 702                   _print_inlining_output(NULL),
 703                   _interpreter_frame_size(0),
 704                   _max_node_limit(MaxNodeLimit),
 705                   _has_reserved_stack_access(target->has_reserved_stack_access()) {
 706   C = this;
 707 #ifndef PRODUCT
 708   if (_printer != NULL) {
 709     _printer->set_compile(this);


1450     }
1451   } else if( ta && _AliasLevel >= 2 ) {
1452     // For arrays indexed by constant indices, we flatten the alias
1453     // space to include all of the array body.  Only the header, klass
1454     // and array length can be accessed un-aliased.
1455     if( offset != Type::OffsetBot ) {
1456       if( ta->const_oop() ) { // MethodData* or Method*
1457         offset = Type::OffsetBot;   // Flatten constant access into array body
1458         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
1459       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1460         // range is OK as-is.
1461         tj = ta = TypeAryPtr::RANGE;
1462       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1463         tj = TypeInstPtr::KLASS; // all klass loads look alike
1464         ta = TypeAryPtr::RANGE; // generic ignored junk
1465         ptr = TypePtr::BotPTR;
1466       } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1467         tj = TypeInstPtr::MARK;
1468         ta = TypeAryPtr::RANGE; // generic ignored junk
1469         ptr = TypePtr::BotPTR;
1470 #if INCLUDE_SHENANDOAHGC
1471       } else if (offset == ShenandoahBrooksPointer::byte_offset() && UseShenandoahGC) {
1472         // Need to distinguish brooks ptr as is.
1473         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
1474 #endif
1475       } else {                  // Random constant offset into array body
1476         offset = Type::OffsetBot;   // Flatten constant access into array body
1477         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
1478       }
1479     }
1480     // Arrays of fixed size alias with arrays of unknown size.
1481     if (ta->size() != TypeInt::POS) {
1482       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1483       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
1484     }
1485     // Arrays of known objects become arrays of unknown objects.
1486     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1487       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1488       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1489     }
1490     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1491       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1492       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1493     }
1494     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so


1519         tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1520       }
1521     } else if( is_known_inst ) {
1522       tj = to; // Keep NotNull and klass_is_exact for instance type
1523     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1524       // During the 2nd round of IterGVN, NotNull castings are removed.
1525       // Make sure the Bottom and NotNull variants alias the same.
1526       // Also, make sure exact and non-exact variants alias the same.
1527       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1528     }
1529     if (to->speculative() != NULL) {
1530       tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
1531     }
1532     // Canonicalize the holder of this field
1533     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1534       // First handle header references such as a LoadKlassNode, even if the
1535       // object's klass is unloaded at compile time (4965979).
1536       if (!is_known_inst) { // Do it only for non-instance types
1537         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
1538       }
1539     } else if (SHENANDOAHGC_ONLY((offset != ShenandoahBrooksPointer::byte_offset() || !UseShenandoahGC) &&) (offset < 0 || offset >= k->size_helper() * wordSize)) {
1540       // Static fields are in the space above the normal instance
1541       // fields in the java.lang.Class instance.
1542       if (to->klass() != ciEnv::current()->Class_klass()) {
1543         to = NULL;
1544         tj = TypeOopPtr::BOTTOM;
1545         offset = tj->offset();
1546       }
1547     } else {
1548       ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
1549       if (!k->equals(canonical_holder) || tj->offset() != offset) {
1550         if( is_known_inst ) {
1551           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
1552         } else {
1553           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
1554         }
1555       }
1556     }
1557   }
1558 
1559   // Klass pointers to object array klasses need some flattening


1617     case Type::AnyPtr:   tj = TypePtr::BOTTOM;      break;  // caller checks it
1618     default: ShouldNotReachHere();
1619     }
1620     break;
1621   case 2:                       // No collapsing at level 2; keep all splits
1622   case 3:                       // No collapsing at level 3; keep all splits
1623     break;
1624   default:
1625     Unimplemented();
1626   }
1627 
1628   offset = tj->offset();
1629   assert( offset != Type::OffsetTop, "Offset has fallen from constant" );
1630 
1631   assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) ||
1632           (offset == Type::OffsetBot && tj->base() == Type::AryPtr) ||
1633           (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) ||
1634           (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) ||
1635           (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1636           (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1637           (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1638           (UseShenandoahGC SHENANDOAHGC_ONLY(&& offset == ShenandoahBrooksPointer::byte_offset() && tj->base() == Type::AryPtr)),
1639           "For oops, klasses, raw offset must be constant; for arrays the offset is never known" );
1640   assert( tj->ptr() != TypePtr::TopPTR &&
1641           tj->ptr() != TypePtr::AnyNull &&
1642           tj->ptr() != TypePtr::Null, "No imprecise addresses" );
1643 //    assert( tj->ptr() != TypePtr::Constant ||
1644 //            tj->base() == Type::RawPtr ||
1645 //            tj->base() == Type::KlassPtr, "No constant oop addresses" );
1646 
1647   return tj;
1648 }
1649 
1650 void Compile::AliasType::Init(int i, const TypePtr* at) {
1651   _index = i;
1652   _adr_type = at;
1653   _field = NULL;
1654   _element = NULL;
1655   _is_rewritable = true; // default
1656   const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
1657   if (atoop != NULL && atoop->is_known_instance()) {
1658     const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot);


2104 }
2105 
2106 // Perform incremental inlining until bound on number of live nodes is reached
2107 void Compile::inline_incrementally(PhaseIterGVN& igvn) {
2108   TracePhase tp("incrementalInline", &timers[_t_incrInline]);
2109 
2110   PhaseGVN* gvn = initial_gvn();
2111 
2112   set_inlining_incrementally(true);
2113   set_inlining_progress(true);
2114   uint low_live_nodes = 0;
2115 
2116   while(inlining_progress() && _late_inlines.length() > 0) {
2117 
2118     if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
2119       if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
2120         TracePhase tp("incrementalInline_ideal", &timers[_t_incrInline_ideal]);
2121         // PhaseIdealLoop is expensive so we only try it once we are
2122         // out of live nodes and we only try it again if the previous
2123         // helped got the number of nodes down significantly
2124         PhaseIdealLoop ideal_loop(igvn, LoopOptsNone);
2125         if (failing())  return;
2126         low_live_nodes = live_nodes();
2127         _major_progress = true;
2128       }
2129 
2130       if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
2131         break;
2132       }
2133     }
2134 
2135     inline_incrementally_one(igvn);
2136 
2137     if (failing())  return;
2138 
2139     {
2140       TracePhase tp("incrementalInline_igvn", &timers[_t_incrInline_igvn]);
2141       igvn.optimize();
2142     }
2143 
2144     if (failing())  return;


2155 
2156     if (failing())  return;
2157 
2158     {
2159       TracePhase tp("incrementalInline_pru", &timers[_t_incrInline_pru]);
2160       ResourceMark rm;
2161       PhaseRemoveUseless pru(initial_gvn(), for_igvn());
2162     }
2163 
2164     {
2165       TracePhase tp("incrementalInline_igvn", &timers[_t_incrInline_igvn]);
2166       igvn = PhaseIterGVN(gvn);
2167       igvn.optimize();
2168     }
2169   }
2170 
2171   set_inlining_incrementally(false);
2172 }
2173 
2174 
2175 bool Compile::optimize_loops(int& loop_opts_cnt, PhaseIterGVN& igvn, LoopOptsMode mode) {
2176   if(loop_opts_cnt > 0) {
2177     debug_only( int cnt = 0; );
2178     while(major_progress() && (loop_opts_cnt > 0)) {
2179       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2180       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2181       PhaseIdealLoop ideal_loop(igvn, mode);
2182       loop_opts_cnt--;
2183       if (failing())  return false;
2184       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2185     }
2186   }
2187   return true;
2188 }
2189 
2190 //------------------------------Optimize---------------------------------------
2191 // Given a graph, optimize it.
2192 void Compile::Optimize() {
2193   TracePhase tp("optimizer", &timers[_t_optimizer]);
2194 
2195 #ifndef PRODUCT
2196   if (_directive->BreakAtCompileOption) {
2197     BREAKPOINT;
2198   }
2199 
2200 #endif
2201 
2202 #ifdef ASSERT
2203   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2204   bs->verify_gc_barriers(true);
2205 #endif
2206 
2207   ResourceMark rm;
2208   int          loop_opts_cnt;
2209 
2210   print_inlining_reinit();
2211 
2212   NOT_PRODUCT( verify_graph_edges(); )
2213 
2214   print_method(PHASE_AFTER_PARSING);
2215 
2216  {
2217   // Iterative Global Value Numbering, including ideal transforms
2218   // Initialize IterGVN with types and values from parse-time GVN
2219   PhaseIterGVN igvn(initial_gvn());
2220 #ifdef ASSERT
2221   _modified_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
2222 #endif
2223   {
2224     TracePhase tp("iterGVN", &timers[_t_iterGVN]);
2225     igvn.optimize();
2226   }
2227 


2228   if (failing())  return;
2229 
2230   print_method(PHASE_ITER_GVN1, 2);
2231 
2232   inline_incrementally(igvn);
2233 
2234   print_method(PHASE_INCREMENTAL_INLINE, 2);
2235 
2236   if (failing())  return;
2237 
2238   if (eliminate_boxing()) {
2239     // Inline valueOf() methods now.
2240     inline_boxing_calls(igvn);
2241 
2242     if (AlwaysIncrementalInline) {
2243       inline_incrementally(igvn);
2244     }
2245 
2246     print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2247 
2248     if (failing())  return;
2249   }
2250 
2251   // Remove the speculative part of types and clean up the graph from


2260 
2261   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2262     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2263     initial_gvn()->replace_with(&igvn);
2264     for_igvn()->clear();
2265     Unique_Node_List new_worklist(C->comp_arena());
2266     {
2267       ResourceMark rm;
2268       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2269     }
2270     set_for_igvn(&new_worklist);
2271     igvn = PhaseIterGVN(initial_gvn());
2272     igvn.optimize();
2273   }
2274 
2275   // Perform escape analysis
2276   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2277     if (has_loops()) {
2278       // Cleanup graph (remove dead nodes).
2279       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2280       PhaseIdealLoop ideal_loop(igvn, LoopOptsNone);
2281       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2282       if (failing())  return;
2283     }
2284     ConnectionGraph::do_analysis(this, &igvn);
2285 
2286     if (failing())  return;
2287 
2288     // Optimize out fields loads from scalar replaceable allocations.
2289     igvn.optimize();
2290     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2291 
2292     if (failing())  return;
2293 
2294     if (congraph() != NULL && macro_count() > 0) {
2295       TracePhase tp("macroEliminate", &timers[_t_macroEliminate]);
2296       PhaseMacroExpand mexp(igvn);
2297       mexp.eliminate_macro_nodes();
2298       igvn.set_delay_transform(false);
2299 
2300       igvn.optimize();
2301       print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
2302 
2303       if (failing())  return;
2304     }
2305   }
2306 
2307   // Loop transforms on the ideal graph.  Range Check Elimination,
2308   // peeling, unrolling, etc.
2309 
2310   // Set loop opts counter
2311   loop_opts_cnt = num_loop_opts();
2312   if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
2313     {
2314       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2315       PhaseIdealLoop ideal_loop(igvn, LoopOptsDefault);
2316       loop_opts_cnt--;
2317       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
2318       if (failing())  return;
2319     }
2320     // Loop opts pass if partial peeling occurred in previous pass
2321     if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
2322       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2323       PhaseIdealLoop ideal_loop(igvn, LoopOptsSkipSplitIf);
2324       loop_opts_cnt--;
2325       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
2326       if (failing())  return;
2327     }
2328     // Loop opts pass for loop-unrolling before CCP
2329     if(major_progress() && (loop_opts_cnt > 0)) {
2330       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2331       PhaseIdealLoop ideal_loop(igvn, LoopOptsSkipSplitIf);
2332       loop_opts_cnt--;
2333       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
2334     }
2335     if (!failing()) {
2336       // Verify that last round of loop opts produced a valid graph
2337       TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2338       PhaseIdealLoop::verify(igvn);
2339     }
2340   }
2341   if (failing())  return;
2342 
2343   // Conditional Constant Propagation;
2344   PhaseCCP ccp( &igvn );
2345   assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
2346   {
2347     TracePhase tp("ccp", &timers[_t_ccp]);
2348     ccp.do_transform();
2349   }
2350   print_method(PHASE_CPP1, 2);
2351 
2352   assert( true, "Break here to ccp.dump_old2new_map()");
2353 
2354   // Iterative Global Value Numbering, including ideal transforms
2355   {
2356     TracePhase tp("iterGVN2", &timers[_t_iterGVN2]);
2357     igvn = ccp;
2358     igvn.optimize();
2359   }
2360 
2361   print_method(PHASE_ITER_GVN2, 2);
2362 
2363   if (failing())  return;
2364 
2365   // Loop transforms on the ideal graph.  Range Check Elimination,
2366   // peeling, unrolling, etc.
2367   if (!optimize_loops(loop_opts_cnt, igvn, LoopOptsDefault)) {
2368     return;








2369   }
2370 
2371 #if INCLUDE_ZGC
2372   if (UseZGC) {
2373     ZBarrierSetC2::find_dominating_barriers(igvn);
2374   }
2375 #endif
2376 
2377   if (failing())  return;
2378 
2379   // Ensure that major progress is now clear
2380   C->clear_major_progress();
2381 
2382   {
2383     // Verify that all previous optimizations produced a valid graph
2384     // at least to this point, even if no loop optimizations were done.
2385     TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2386     PhaseIdealLoop::verify(igvn);
2387   }
2388 


2390     // No more loop optimizations. Remove all range check dependent CastIINodes.
2391     C->remove_range_check_casts(igvn);
2392     igvn.optimize();
2393   }
2394 
2395 #ifdef ASSERT
2396   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2397   bs->verify_gc_barriers(false);
2398 #endif
2399 
2400   {
2401     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2402     PhaseMacroExpand  mex(igvn);
2403     print_method(PHASE_BEFORE_MACRO_EXPANSION, 2);
2404     if (mex.expand_macro_nodes()) {
2405       assert(failing(), "must bail out w/ explicit message");
2406       return;
2407     }
2408   }
2409 
2410   print_method(PHASE_BEFORE_BARRIER_EXPAND, 2);
2411 
2412 #if INCLUDE_SHENANDOAHGC
2413   if (UseShenandoahGC && !ShenandoahWriteBarrierNode::expand(this, igvn, loop_opts_cnt)) {
2414     assert(failing(), "must bail out w/ explicit message");
2415     return;
2416   }
2417 #endif
2418 
2419   if (opaque4_count() > 0) {
2420     C->remove_opaque4_nodes(igvn);
2421     igvn.optimize();
2422   }
2423 
2424   DEBUG_ONLY( _modified_nodes = NULL; )
2425  } // (End scope of igvn; run destructor if necessary for asserts.)
2426 
2427  process_print_inlining();
2428  // A method with only infinite loops has no edges entering loops from root
2429  {
2430    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2431    if (final_graph_reshaping()) {
2432      assert(failing(), "must bail out w/ explicit message");
2433      return;
2434    }
2435  }
2436 
2437  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2438 }


2828   // case Op_ConvD2L: // handled by leaf call
2829   case Op_ConD:
2830   case Op_CmpD:
2831   case Op_CmpD3:
2832     frc.inc_double_count();
2833     break;
2834   case Op_Opaque1:              // Remove Opaque Nodes before matching
2835   case Op_Opaque2:              // Remove Opaque Nodes before matching
2836   case Op_Opaque3:
2837     n->subsume_by(n->in(1), this);
2838     break;
2839   case Op_CallStaticJava:
2840   case Op_CallJava:
2841   case Op_CallDynamicJava:
2842     frc.inc_java_call_count(); // Count java call site;
2843   case Op_CallRuntime:
2844   case Op_CallLeaf:
2845   case Op_CallLeafNoFP: {
2846     assert (n->is_Call(), "");
2847     CallNode *call = n->as_Call();
2848 #if INCLUDE_SHENANDOAHGC
2849     if (UseShenandoahGC && ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) {
2850       uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt();
2851       if (call->req() > cnt) {
2852         assert(call->req() == cnt+1, "only one extra input");
2853         Node* addp = call->in(cnt);
2854         assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?");
2855         call->del_req(cnt);
2856       }
2857     }
2858 #endif
2859     // Count call sites where the FP mode bit would have to be flipped.
2860     // Do not count uncommon runtime calls:
2861     // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
2862     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
2863     if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
2864       frc.inc_call_count();   // Count the call site
2865     } else {                  // See if uncommon argument is shared
2866       Node *n = call->in(TypeFunc::Parms);
2867       int nop = n->Opcode();
2868       // Clone shared simple arguments to uncommon calls, item (1).
2869       if (n->outcnt() > 1 &&
2870           !n->is_Proj() &&
2871           nop != Op_CreateEx &&
2872           nop != Op_CheckCastPP &&
2873           nop != Op_DecodeN &&
2874           nop != Op_DecodeNKlass &&
2875           !n->is_Mem() &&
2876           !n->is_Phi()) {
2877         Node *x = n->clone();
2878         call->set_req(TypeFunc::Parms, x);


3041 #endif
3042     // platform dependent reshaping of the address expression
3043     reshape_address(n->as_AddP());
3044     break;
3045   }
3046 
3047   case Op_CastPP: {
3048     // Remove CastPP nodes to gain more freedom during scheduling but
3049     // keep the dependency they encode as control or precedence edges
3050     // (if control is set already) on memory operations. Some CastPP
3051     // nodes don't have a control (don't carry a dependency): skip
3052     // those.
3053     if (n->in(0) != NULL) {
3054       ResourceMark rm;
3055       Unique_Node_List wq;
3056       wq.push(n);
3057       for (uint next = 0; next < wq.size(); ++next) {
3058         Node *m = wq.at(next);
3059         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
3060           Node* use = m->fast_out(i);
3061           if (use->is_Mem() || use->is_EncodeNarrowPtr() || use->is_ShenandoahBarrier()) {
3062             use->ensure_control_or_add_prec(n->in(0));
3063           } else {
3064             switch(use->Opcode()) {
3065             case Op_AddP:
3066             case Op_DecodeN:
3067             case Op_DecodeNKlass:
3068             case Op_CheckCastPP:
3069             case Op_CastPP:
3070               wq.push(use);
3071               break;
3072             }
3073           }
3074         }
3075       }
3076     }
3077     const bool is_LP64 = LP64_ONLY(true) NOT_LP64(false);
3078     if (is_LP64 && n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
3079       Node* in1 = n->in(1);
3080       const Type* t = n->bottom_type();
3081       Node* new_in1 = in1->clone();


3377         }
3378       } else {
3379         if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
3380           Node* shift = new AndINode(in2, ConNode::make(TypeInt::make(mask)));
3381           n->set_req(2, shift);
3382         }
3383       }
3384       if (in2->outcnt() == 0) { // Remove dead node
3385         in2->disconnect_inputs(NULL, this);
3386       }
3387     }
3388     break;
3389   case Op_MemBarStoreStore:
3390   case Op_MemBarRelease:
3391     // Break the link with AllocateNode: it is no longer useful and
3392     // confuses register allocation.
3393     if (n->req() > MemBarNode::Precedent) {
3394       n->set_req(MemBarNode::Precedent, top());
3395     }
3396     break;
3397 #if INCLUDE_SHENANDOAHGC
3398   case Op_ShenandoahReadBarrier:
3399     break;
3400   case Op_ShenandoahWriteBarrier:
3401     assert(false, "should have been expanded already");
3402     break;
3403 #endif
3404   case Op_RangeCheck: {
3405     RangeCheckNode* rc = n->as_RangeCheck();
3406     Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
3407     n->subsume_by(iff, this);
3408     frc._tests.push(iff);
3409     break;
3410   }
3411   case Op_ConvI2L: {
3412     if (!Matcher::convi2l_type_required) {
3413       // Code generation on some platforms doesn't need accurate
3414       // ConvI2L types. Widening the type can help remove redundant
3415       // address computations.
3416       n->as_Type()->set_type(TypeLong::INT);
3417       ResourceMark rm;
3418       Node_List wq;
3419       wq.push(n);
3420       for (uint next = 0; next < wq.size(); next++) {
3421         Node *m = wq.at(next);
3422 
3423         for(;;) {


3821           if (use->is_Con())        continue;  // a dead ConNode is OK
3822           // At this point, we have found a dead node which is DU-reachable.
3823           if (!dead_nodes) {
3824             tty->print_cr("*** Dead nodes reachable via DU edges:");
3825             dead_nodes = true;
3826           }
3827           use->dump(2);
3828           tty->print_cr("---");
3829           checked.push(use);  // No repeats; pretend it is now checked.
3830         }
3831       }
3832       assert(!dead_nodes, "using nodes must be reachable from root");
3833     }
3834   }
3835 }
3836 
3837 // Verify GC barriers consistency
3838 // Currently supported:
3839 // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
3840 void Compile::verify_barriers() {
3841 #if INCLUDE_G1GC || INCLUDE_SHENANDOAHGC
3842   if (UseG1GC || UseShenandoahGC) {
3843     // Verify G1 pre-barriers
3844 
3845 #if INCLUDE_G1GC && INCLUDE_SHENANDOAHGC
3846     const int marking_offset = in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_active_offset()
3847                                                 : ShenandoahThreadLocalData::satb_mark_queue_active_offset());
3848 #elif INCLUDE_G1GC
3849     const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
3850 #else
3851     const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset());
3852 #endif
3853 
3854     ResourceArea *area = Thread::current()->resource_area();
3855     Unique_Node_List visited(area);
3856     Node_List worklist(area);
3857     // We're going to walk control flow backwards starting from the Root
3858     worklist.push(_root);
3859     while (worklist.size() > 0) {
3860       Node* x = worklist.pop();
3861       if (x == NULL || x == top()) continue;
3862       if (visited.member(x)) {
3863         continue;
3864       } else {
3865         visited.push(x);
3866       }
3867 
3868       if (x->is_Region()) {
3869         for (uint i = 1; i < x->req(); i++) {
3870           worklist.push(x->in(i));
3871         }
3872       } else {


4588 
4589 /**
4590  * Remove the speculative part of types and clean up the graph
4591  */
4592 void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
4593   if (UseTypeSpeculation) {
4594     Unique_Node_List worklist;
4595     worklist.push(root());
4596     int modified = 0;
4597     // Go over all type nodes that carry a speculative type, drop the
4598     // speculative part of the type and enqueue the node for an igvn
4599     // which may optimize it out.
4600     for (uint next = 0; next < worklist.size(); ++next) {
4601       Node *n  = worklist.at(next);
4602       if (n->is_Type()) {
4603         TypeNode* tn = n->as_Type();
4604         const Type* t = tn->type();
4605         const Type* t_no_spec = t->remove_speculative();
4606         if (t_no_spec != t) {
4607           bool in_hash = igvn.hash_delete(n);
4608           assert(in_hash || (UseShenandoahGC && n->hash() == Node::NO_HASH), "node should be in igvn hash table");
4609           tn->set_type(t_no_spec);
4610           igvn.hash_insert(n);
4611           igvn._worklist.push(n); // give it a chance to go away
4612           modified++;
4613         }
4614       }
4615       uint max = n->len();
4616       for( uint i = 0; i < max; ++i ) {
4617         Node *m = n->in(i);
4618         if (not_a_node(m))  continue;
4619         worklist.push(m);
4620       }
4621     }
4622     // Drop the speculative part of all types in the igvn's type table
4623     igvn.remove_speculative_types();
4624     if (modified > 0) {
4625       igvn.optimize();
4626     }
4627 #ifdef ASSERT
4628     // Verify that after the IGVN is over no speculative type has resurfaced


< prev index next >