< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page

  40 #include "gc/shared/barrierSet.hpp"
  41 #include "gc/shared/c2/barrierSetC2.hpp"
  42 #include "jfr/jfrEvents.hpp"
  43 #include "jvm_io.h"
  44 #include "memory/allocation.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "opto/addnode.hpp"
  47 #include "opto/block.hpp"
  48 #include "opto/c2compiler.hpp"
  49 #include "opto/callGenerator.hpp"
  50 #include "opto/callnode.hpp"
  51 #include "opto/castnode.hpp"
  52 #include "opto/cfgnode.hpp"
  53 #include "opto/chaitin.hpp"
  54 #include "opto/compile.hpp"
  55 #include "opto/connode.hpp"
  56 #include "opto/convertnode.hpp"
  57 #include "opto/divnode.hpp"
  58 #include "opto/escape.hpp"
  59 #include "opto/idealGraphPrinter.hpp"

  60 #include "opto/locknode.hpp"
  61 #include "opto/loopnode.hpp"
  62 #include "opto/machnode.hpp"
  63 #include "opto/macro.hpp"
  64 #include "opto/matcher.hpp"
  65 #include "opto/mathexactnode.hpp"
  66 #include "opto/memnode.hpp"
  67 #include "opto/mulnode.hpp"
  68 #include "opto/narrowptrnode.hpp"
  69 #include "opto/node.hpp"
  70 #include "opto/opcodes.hpp"
  71 #include "opto/output.hpp"
  72 #include "opto/parse.hpp"
  73 #include "opto/phaseX.hpp"
  74 #include "opto/rootnode.hpp"
  75 #include "opto/runtime.hpp"
  76 #include "opto/stringopts.hpp"
  77 #include "opto/type.hpp"
  78 #include "opto/vector.hpp"
  79 #include "opto/vectornode.hpp"

 385   // as dead to be conservative about the dead node count at any
 386   // given time.
 387   if (!dead->is_Con()) {
 388     record_dead_node(dead->_idx);
 389   }
 390   if (dead->is_macro()) {
 391     remove_macro_node(dead);
 392   }
 393   if (dead->is_expensive()) {
 394     remove_expensive_node(dead);
 395   }
 396   if (dead->Opcode() == Op_Opaque4) {
 397     remove_template_assertion_predicate_opaq(dead);
 398   }
 399   if (dead->is_ParsePredicate()) {
 400     remove_parse_predicate(dead->as_ParsePredicate());
 401   }
 402   if (dead->for_post_loop_opts_igvn()) {
 403     remove_from_post_loop_opts_igvn(dead);
 404   }



 405   if (dead->is_Call()) {
 406     remove_useless_late_inlines(                &_late_inlines, dead);
 407     remove_useless_late_inlines(         &_string_late_inlines, dead);
 408     remove_useless_late_inlines(         &_boxing_late_inlines, dead);
 409     remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
 410 
 411     if (dead->is_CallStaticJava()) {
 412       remove_unstable_if_trap(dead->as_CallStaticJava(), false);
 413     }
 414   }
 415   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 416   bs->unregister_potential_barrier_node(dead);
 417 }
 418 
 419 // Disconnect all useless nodes by disconnecting those at the boundary.
 420 void Compile::disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist) {
 421   uint next = 0;
 422   while (next < useful.size()) {
 423     Node *n = useful.at(next++);
 424     if (n->is_SafePoint()) {

 426       // beyond that point.
 427       n->as_SafePoint()->delete_replaced_nodes();
 428     }
 429     // Use raw traversal of out edges since this code removes out edges
 430     int max = n->outcnt();
 431     for (int j = 0; j < max; ++j) {
 432       Node* child = n->raw_out(j);
 433       if (!useful.member(child)) {
 434         assert(!child->is_top() || child != top(),
 435                "If top is cached in Compile object it is in useful list");
 436         // Only need to remove this out-edge to the useless node
 437         n->raw_del_out(j);
 438         --j;
 439         --max;
 440       }
 441     }
 442     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 443       assert(useful.member(n->unique_out()), "do not push a useless node");
 444       worklist.push(n->unique_out());
 445     }



 446   }
 447 
 448   remove_useless_nodes(_macro_nodes,        useful); // remove useless macro nodes
 449   remove_useless_nodes(_parse_predicates,   useful); // remove useless Parse Predicate nodes
 450   remove_useless_nodes(_template_assertion_predicate_opaqs, useful); // remove useless Assertion Predicate opaque nodes
 451   remove_useless_nodes(_expensive_nodes,    useful); // remove useless expensive nodes
 452   remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass






 453   remove_useless_unstable_if_traps(useful);          // remove useless unstable_if traps
 454   remove_useless_coarsened_locks(useful);            // remove useless coarsened locks nodes
 455 #ifdef ASSERT
 456   if (_modified_nodes != nullptr) {
 457     _modified_nodes->remove_useless_nodes(useful.member_set());
 458   }
 459 #endif
 460 
 461   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 462   bs->eliminate_useless_gc_barriers(useful, this);
 463   // clean up the late inline lists
 464   remove_useless_late_inlines(                &_late_inlines, useful);
 465   remove_useless_late_inlines(         &_string_late_inlines, useful);
 466   remove_useless_late_inlines(         &_boxing_late_inlines, useful);
 467   remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
 468   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 469 }
 470 
 471 // ============================================================================
 472 //------------------------------CompileWrapper---------------------------------

 612 
 613 
 614 Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
 615                   Options options, DirectiveSet* directive)
 616                 : Phase(Compiler),
 617                   _compile_id(ci_env->compile_id()),
 618                   _options(options),
 619                   _method(target),
 620                   _entry_bci(osr_bci),
 621                   _ilt(nullptr),
 622                   _stub_function(nullptr),
 623                   _stub_name(nullptr),
 624                   _stub_entry_point(nullptr),
 625                   _max_node_limit(MaxNodeLimit),
 626                   _post_loop_opts_phase(false),
 627                   _allow_macro_nodes(true),
 628                   _inlining_progress(false),
 629                   _inlining_incrementally(false),
 630                   _do_cleanup(false),
 631                   _has_reserved_stack_access(target->has_reserved_stack_access()),

 632 #ifndef PRODUCT
 633                   _igv_idx(0),
 634                   _trace_opto_output(directive->TraceOptoOutputOption),
 635 #endif
 636                   _has_method_handle_invokes(false),
 637                   _clinit_barrier_on_entry(false),
 638                   _stress_seed(0),
 639                   _comp_arena(mtCompiler),
 640                   _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 641                   _env(ci_env),
 642                   _directive(directive),
 643                   _log(ci_env->log()),
 644                   _first_failure_details(nullptr),
 645                   _intrinsics        (comp_arena(), 0, 0, nullptr),
 646                   _macro_nodes       (comp_arena(), 8, 0, nullptr),
 647                   _parse_predicates  (comp_arena(), 8, 0, nullptr),
 648                   _template_assertion_predicate_opaqs (comp_arena(), 8, 0, nullptr),
 649                   _expensive_nodes   (comp_arena(), 8, 0, nullptr),
 650                   _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),

 651                   _unstable_if_traps (comp_arena(), 8, 0, nullptr),
 652                   _coarsened_locks   (comp_arena(), 8, 0, nullptr),
 653                   _congraph(nullptr),
 654                   NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 655                   _unique(0),
 656                   _dead_node_count(0),
 657                   _dead_node_list(comp_arena()),
 658                   _node_arena_one(mtCompiler, Arena::Tag::tag_node),
 659                   _node_arena_two(mtCompiler, Arena::Tag::tag_node),
 660                   _node_arena(&_node_arena_one),
 661                   _mach_constant_base_node(nullptr),
 662                   _Compile_types(mtCompiler),
 663                   _initial_gvn(nullptr),
 664                   _igvn_worklist(nullptr),
 665                   _types(nullptr),
 666                   _node_hash(nullptr),
 667                   _late_inlines(comp_arena(), 2, 0, nullptr),
 668                   _string_late_inlines(comp_arena(), 2, 0, nullptr),
 669                   _boxing_late_inlines(comp_arena(), 2, 0, nullptr),
 670                   _vector_reboxing_late_inlines(comp_arena(), 2, 0, nullptr),

 733 
 734   // GVN that will be run immediately on new nodes
 735   uint estimated_size = method()->code_size()*4+64;
 736   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
 737   _igvn_worklist = new (comp_arena()) Unique_Node_List(comp_arena());
 738   _types = new (comp_arena()) Type_Array(comp_arena());
 739   _node_hash = new (comp_arena()) NodeHash(comp_arena(), estimated_size);
 740   PhaseGVN gvn;
 741   set_initial_gvn(&gvn);
 742 
 743   print_inlining_init();
 744   { // Scope for timing the parser
 745     TracePhase tp("parse", &timers[_t_parser]);
 746 
 747     // Put top into the hash table ASAP.
 748     initial_gvn()->transform(top());
 749 
 750     // Set up tf(), start(), and find a CallGenerator.
 751     CallGenerator* cg = nullptr;
 752     if (is_osr_compilation()) {
 753       const TypeTuple *domain = StartOSRNode::osr_domain();
 754       const TypeTuple *range = TypeTuple::make_range(method()->signature());
 755       init_tf(TypeFunc::make(domain, range));
 756       StartNode* s = new StartOSRNode(root(), domain);
 757       initial_gvn()->set_type_bottom(s);
 758       init_start(s);
 759       cg = CallGenerator::for_osr(method(), entry_bci());
 760     } else {
 761       // Normal case.
 762       init_tf(TypeFunc::make(method()));
 763       StartNode* s = new StartNode(root(), tf()->domain());
 764       initial_gvn()->set_type_bottom(s);
 765       init_start(s);
 766       if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
 767         // With java.lang.ref.reference.get() we must go through the
 768         // intrinsic - even when get() is the root
 769         // method of the compile - so that, if necessary, the value in
 770         // the referent field of the reference object gets recorded by
 771         // the pre-barrier code.
 772         cg = find_intrinsic(method(), false);
 773       }
 774       if (cg == nullptr) {
 775         float past_uses = method()->interpreter_invocation_count();
 776         float expected_uses = past_uses;
 777         cg = CallGenerator::for_inline(method(), expected_uses);
 778       }
 779     }
 780     if (failing())  return;
 781     if (cg == nullptr) {
 782       const char* reason = InlineTree::check_can_parse(method());
 783       assert(reason != nullptr, "expect reason for parse failure");

 859     print_ideal_ir("print_ideal");
 860   }
 861 #endif
 862 
 863 #ifdef ASSERT
 864   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 865   bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
 866 #endif
 867 
 868   // Dump compilation data to replay it.
 869   if (directive->DumpReplayOption) {
 870     env()->dump_replay_data(_compile_id);
 871   }
 872   if (directive->DumpInlineOption && (ilt() != nullptr)) {
 873     env()->dump_inline_data(_compile_id);
 874   }
 875 
 876   // Now that we know the size of all the monitors we can add a fixed slot
 877   // for the original deopt pc.
 878   int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);










 879   set_fixed_slots(next_slot);
 880 
 881   // Compute when to use implicit null checks. Used by matching trap based
 882   // nodes and NullCheck optimization.
 883   set_allowed_deopt_reasons();
 884 
 885   // Now generate code
 886   Code_Gen();
 887 }
 888 
 889 //------------------------------Compile----------------------------------------
 890 // Compile a runtime stub
 891 Compile::Compile( ciEnv* ci_env,
 892                   TypeFunc_generator generator,
 893                   address stub_function,
 894                   const char *stub_name,
 895                   int is_fancy_jump,
 896                   bool pass_tls,
 897                   bool return_pc,
 898                   DirectiveSet* directive)
 899   : Phase(Compiler),
 900     _compile_id(0),
 901     _options(Options::for_runtime_stub()),
 902     _method(nullptr),
 903     _entry_bci(InvocationEntryBci),
 904     _stub_function(stub_function),
 905     _stub_name(stub_name),
 906     _stub_entry_point(nullptr),
 907     _max_node_limit(MaxNodeLimit),
 908     _post_loop_opts_phase(false),
 909     _allow_macro_nodes(true),
 910     _inlining_progress(false),
 911     _inlining_incrementally(false),
 912     _has_reserved_stack_access(false),

 913 #ifndef PRODUCT
 914     _igv_idx(0),
 915     _trace_opto_output(directive->TraceOptoOutputOption),
 916 #endif
 917     _has_method_handle_invokes(false),
 918     _clinit_barrier_on_entry(false),
 919     _stress_seed(0),
 920     _comp_arena(mtCompiler),
 921     _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 922     _env(ci_env),
 923     _directive(directive),
 924     _log(ci_env->log()),
 925     _first_failure_details(nullptr),
 926     _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 927     _congraph(nullptr),
 928     NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 929     _unique(0),
 930     _dead_node_count(0),
 931     _dead_node_list(comp_arena()),
 932     _node_arena_one(mtCompiler),

1038 
1039   _fixed_slots = 0;
1040   set_has_split_ifs(false);
1041   set_has_loops(false); // first approximation
1042   set_has_stringbuilder(false);
1043   set_has_boxed_value(false);
1044   _trap_can_recompile = false;  // no traps emitted yet
1045   _major_progress = true; // start out assuming good things will happen
1046   set_has_unsafe_access(false);
1047   set_max_vector_size(0);
1048   set_clear_upper_avx(false);  //false as default for clear upper bits of ymm registers
1049   Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1050   set_decompile_count(0);
1051 
1052 #ifndef PRODUCT
1053   Copy::zero_to_bytes(_igv_phase_iter, sizeof(_igv_phase_iter));
1054 #endif
1055 
1056   set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
1057   _loop_opts_cnt = LoopOptsCount;




1058   set_do_inlining(Inline);
1059   set_max_inline_size(MaxInlineSize);
1060   set_freq_inline_size(FreqInlineSize);
1061   set_do_scheduling(OptoScheduling);
1062 
1063   set_do_vector_loop(false);
1064   set_has_monitors(false);
1065 
1066   if (AllowVectorizeOnDemand) {
1067     if (has_method() && _directive->VectorizeOption) {
1068       set_do_vector_loop(true);
1069       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
1070     } else if (has_method() && method()->name() != 0 &&
1071                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1072       set_do_vector_loop(true);
1073     }
1074   }
1075   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1076   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})
1077 

1326   // If this method has already thrown a range-check,
1327   // assume it was because we already tried range smearing
1328   // and it failed.
1329   uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1330   return !already_trapped;
1331 }
1332 
1333 
1334 //------------------------------flatten_alias_type-----------------------------
1335 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1336   assert(do_aliasing(), "Aliasing should be enabled");
1337   int offset = tj->offset();
1338   TypePtr::PTR ptr = tj->ptr();
1339 
1340   // Known instance (scalarizable allocation) alias only with itself.
1341   bool is_known_inst = tj->isa_oopptr() != nullptr &&
1342                        tj->is_oopptr()->is_known_instance();
1343 
1344   // Process weird unsafe references.
1345   if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1346     assert(InlineUnsafeOps || StressReflectiveCode, "indeterminate pointers come only from unsafe ops");

1347     assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1348     tj = TypeOopPtr::BOTTOM;
1349     ptr = tj->ptr();
1350     offset = tj->offset();
1351   }
1352 
1353   // Array pointers need some flattening
1354   const TypeAryPtr* ta = tj->isa_aryptr();
1355   if (ta && ta->is_stable()) {
1356     // Erase stability property for alias analysis.
1357     tj = ta = ta->cast_to_stable(false);
1358   }









1359   if( ta && is_known_inst ) {
1360     if ( offset != Type::OffsetBot &&
1361          offset > arrayOopDesc::length_offset_in_bytes() ) {
1362       offset = Type::OffsetBot; // Flatten constant access into array body only
1363       tj = ta = ta->
1364               remove_speculative()->
1365               cast_to_ptr_type(ptr)->
1366               with_offset(offset);
1367     }
1368   } else if (ta) {
1369     // For arrays indexed by constant indices, we flatten the alias
1370     // space to include all of the array body.  Only the header, klass
1371     // and array length can be accessed un-aliased.


1372     if( offset != Type::OffsetBot ) {
1373       if( ta->const_oop() ) { // MethodData* or Method*
1374         offset = Type::OffsetBot;   // Flatten constant access into array body
1375         tj = ta = ta->
1376                 remove_speculative()->
1377                 cast_to_ptr_type(ptr)->
1378                 cast_to_exactness(false)->
1379                 with_offset(offset);
1380       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1381         // range is OK as-is.
1382         tj = ta = TypeAryPtr::RANGE;
1383       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1384         tj = TypeInstPtr::KLASS; // all klass loads look alike
1385         ta = TypeAryPtr::RANGE; // generic ignored junk
1386         ptr = TypePtr::BotPTR;
1387       } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1388         tj = TypeInstPtr::MARK;
1389         ta = TypeAryPtr::RANGE; // generic ignored junk
1390         ptr = TypePtr::BotPTR;
1391       } else {                  // Random constant offset into array body
1392         offset = Type::OffsetBot;   // Flatten constant access into array body
1393         tj = ta = ta->
1394                 remove_speculative()->
1395                 cast_to_ptr_type(ptr)->
1396                 cast_to_exactness(false)->
1397                 with_offset(offset);
1398       }
1399     }
1400     // Arrays of fixed size alias with arrays of unknown size.
1401     if (ta->size() != TypeInt::POS) {
1402       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1403       tj = ta = ta->
1404               remove_speculative()->
1405               cast_to_ptr_type(ptr)->
1406               with_ary(tary)->
1407               cast_to_exactness(false);
1408     }
1409     // Arrays of known objects become arrays of unknown objects.
1410     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1411       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1412       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,offset);
1413     }
1414     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1415       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1416       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,offset);





1417     }
1418     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1419     // cannot be distinguished by bytecode alone.
1420     if (ta->elem() == TypeInt::BOOL) {
1421       const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1422       ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1423       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
1424     }
1425     // During the 2nd round of IterGVN, NotNull castings are removed.
1426     // Make sure the Bottom and NotNull variants alias the same.
1427     // Also, make sure exact and non-exact variants alias the same.
1428     if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != nullptr) {
1429       tj = ta = ta->
1430               remove_speculative()->
1431               cast_to_ptr_type(TypePtr::BotPTR)->
1432               cast_to_exactness(false)->
1433               with_offset(offset);
1434     }
1435   }
1436 
1437   // Oop pointers need some flattening
1438   const TypeInstPtr *to = tj->isa_instptr();
1439   if (to && to != TypeOopPtr::BOTTOM) {
1440     ciInstanceKlass* ik = to->instance_klass();
1441     if( ptr == TypePtr::Constant ) {
1442       if (ik != ciEnv::current()->Class_klass() ||
1443           offset < ik->layout_helper_size_in_bytes()) {

1453     } else if( is_known_inst ) {
1454       tj = to; // Keep NotNull and klass_is_exact for instance type
1455     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1456       // During the 2nd round of IterGVN, NotNull castings are removed.
1457       // Make sure the Bottom and NotNull variants alias the same.
1458       // Also, make sure exact and non-exact variants alias the same.
1459       tj = to = to->
1460               remove_speculative()->
1461               cast_to_instance_id(TypeOopPtr::InstanceBot)->
1462               cast_to_ptr_type(TypePtr::BotPTR)->
1463               cast_to_exactness(false);
1464     }
1465     if (to->speculative() != nullptr) {
1466       tj = to = to->remove_speculative();
1467     }
1468     // Canonicalize the holder of this field
1469     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1470       // First handle header references such as a LoadKlassNode, even if the
1471       // object's klass is unloaded at compile time (4965979).
1472       if (!is_known_inst) { // Do it only for non-instance types
1473         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, nullptr, offset);
1474       }
1475     } else if (offset < 0 || offset >= ik->layout_helper_size_in_bytes()) {
1476       // Static fields are in the space above the normal instance
1477       // fields in the java.lang.Class instance.
1478       if (ik != ciEnv::current()->Class_klass()) {
1479         to = nullptr;
1480         tj = TypeOopPtr::BOTTOM;
1481         offset = tj->offset();
1482       }
1483     } else {
1484       ciInstanceKlass *canonical_holder = ik->get_canonical_holder(offset);
1485       assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1486       if (!ik->equals(canonical_holder) || tj->offset() != offset) {
1487         if( is_known_inst ) {
1488           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, nullptr, offset, to->instance_id());
1489         } else {
1490           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, nullptr, offset);
1491         }
1492       }
1493     }
1494   }
1495 
1496   // Klass pointers to object array klasses need some flattening
1497   const TypeKlassPtr *tk = tj->isa_klassptr();
1498   if( tk ) {
1499     // If we are referencing a field within a Klass, we need
1500     // to assume the worst case of an Object.  Both exact and
1501     // inexact types must flatten to the same alias class so
1502     // use NotNull as the PTR.
1503     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1504       tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull,
1505                                        env()->Object_klass(),
1506                                        offset);
1507     }
1508 
1509     if (tk->isa_aryklassptr() && tk->is_aryklassptr()->elem()->isa_klassptr()) {
1510       ciKlass* k = ciObjArrayKlass::make(env()->Object_klass());
1511       if (!k || !k->is_loaded()) {                  // Only fails for some -Xcomp runs
1512         tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull, env()->Object_klass(), offset);
1513       } else {
1514         tj = tk = TypeAryKlassPtr::make(TypePtr::NotNull, tk->is_aryklassptr()->elem(), k, offset);
1515       }
1516     }
1517 
1518     // Check for precise loads from the primary supertype array and force them
1519     // to the supertype cache alias index.  Check for generic array loads from
1520     // the primary supertype array and also force them to the supertype cache
1521     // alias index.  Since the same load can reach both, we need to merge
1522     // these 2 disparate memories into the same alias class.  Since the
1523     // primary supertype array is read-only, there's no chance of confusion
1524     // where we bypass an array load and an array store.
1525     int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1526     if (offset == Type::OffsetBot ||
1527         (offset >= primary_supers_offset &&
1528          offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1529         offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1530       offset = in_bytes(Klass::secondary_super_cache_offset());
1531       tj = tk = tk->with_offset(offset);
1532     }
1533   }
1534 
1535   // Flatten all Raw pointers together.
1536   if (tj->base() == Type::RawPtr)
1537     tj = TypeRawPtr::BOTTOM;

1627   intptr_t key = (intptr_t) adr_type;
1628   key ^= key >> logAliasCacheSize;
1629   return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1630 }
1631 
1632 
1633 //-----------------------------grow_alias_types--------------------------------
1634 void Compile::grow_alias_types() {
1635   const int old_ats  = _max_alias_types; // how many before?
1636   const int new_ats  = old_ats;          // how many more?
1637   const int grow_ats = old_ats+new_ats;  // how many now?
1638   _max_alias_types = grow_ats;
1639   _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1640   AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1641   Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1642   for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
1643 }
1644 
1645 
1646 //--------------------------------find_alias_type------------------------------
1647 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
1648   if (!do_aliasing()) {
1649     return alias_type(AliasIdxBot);
1650   }
1651 
1652   AliasCacheEntry* ace = probe_alias_cache(adr_type);
1653   if (ace->_adr_type == adr_type) {
1654     return alias_type(ace->_index);



1655   }
1656 
1657   // Handle special cases.
1658   if (adr_type == nullptr)          return alias_type(AliasIdxTop);
1659   if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);
1660 
1661   // Do it the slow way.
1662   const TypePtr* flat = flatten_alias_type(adr_type);
1663 
1664 #ifdef ASSERT
1665   {
1666     ResourceMark rm;
1667     assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1668            Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1669     assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1670            Type::str(adr_type));
1671     if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1672       const TypeOopPtr* foop = flat->is_oopptr();
1673       // Scalarizable allocations have exact klass always.
1674       bool exact = !foop->klass_is_exact() || foop->is_known_instance();

1684     if (alias_type(i)->adr_type() == flat) {
1685       idx = i;
1686       break;
1687     }
1688   }
1689 
1690   if (idx == AliasIdxTop) {
1691     if (no_create)  return nullptr;
1692     // Grow the array if necessary.
1693     if (_num_alias_types == _max_alias_types)  grow_alias_types();
1694     // Add a new alias type.
1695     idx = _num_alias_types++;
1696     _alias_types[idx]->Init(idx, flat);
1697     if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
1698     if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
1699     if (flat->isa_instptr()) {
1700       if (flat->offset() == java_lang_Class::klass_offset()
1701           && flat->is_instptr()->instance_klass() == env()->Class_klass())
1702         alias_type(idx)->set_rewritable(false);
1703     }

1704     if (flat->isa_aryptr()) {
1705 #ifdef ASSERT
1706       const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1707       // (T_BYTE has the weakest alignment and size restrictions...)
1708       assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1709 #endif

1710       if (flat->offset() == TypePtr::OffsetBot) {
1711         alias_type(idx)->set_element(flat->is_aryptr()->elem());







1712       }
1713     }
1714     if (flat->isa_klassptr()) {
1715       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1716         alias_type(idx)->set_rewritable(false);
1717       if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1718         alias_type(idx)->set_rewritable(false);
1719       if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1720         alias_type(idx)->set_rewritable(false);
1721       if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1722         alias_type(idx)->set_rewritable(false);


1723       if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1724         alias_type(idx)->set_rewritable(false);
1725     }
1726     // %%% (We would like to finalize JavaThread::threadObj_offset(),
1727     // but the base pointer type is not distinctive enough to identify
1728     // references into JavaThread.)
1729 
1730     // Check for final fields.
1731     const TypeInstPtr* tinst = flat->isa_instptr();
1732     if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1733       ciField* field;
1734       if (tinst->const_oop() != nullptr &&
1735           tinst->instance_klass() == ciEnv::current()->Class_klass() &&
1736           tinst->offset() >= (tinst->instance_klass()->layout_helper_size_in_bytes())) {
1737         // static field
1738         ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1739         field = k->get_field_by_offset(tinst->offset(), true);




1740       } else {
1741         ciInstanceKlass *k = tinst->instance_klass();
1742         field = k->get_field_by_offset(tinst->offset(), false);
1743       }
1744       assert(field == nullptr ||
1745              original_field == nullptr ||
1746              (field->holder() == original_field->holder() &&
1747               field->offset_in_bytes() == original_field->offset_in_bytes() &&
1748               field->is_static() == original_field->is_static()), "wrong field?");
1749       // Set field() and is_rewritable() attributes.
1750       if (field != nullptr)  alias_type(idx)->set_field(field);







1751     }
1752   }
1753 
1754   // Fill the cache for next time.
1755   ace->_adr_type = adr_type;
1756   ace->_index    = idx;
1757   assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");

1758 
1759   // Might as well try to fill the cache for the flattened version, too.
1760   AliasCacheEntry* face = probe_alias_cache(flat);
1761   if (face->_adr_type == nullptr) {
1762     face->_adr_type = flat;
1763     face->_index    = idx;
1764     assert(alias_type(flat) == alias_type(idx), "flat type must work too");

1765   }
1766 
1767   return alias_type(idx);
1768 }
1769 
1770 
1771 Compile::AliasType* Compile::alias_type(ciField* field) {
1772   const TypeOopPtr* t;
1773   if (field->is_static())
1774     t = TypeInstPtr::make(field->holder()->java_mirror());
1775   else
1776     t = TypeOopPtr::make_from_klass_raw(field->holder());
1777   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1778   assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1779   return atp;
1780 }
1781 
1782 
1783 //------------------------------have_alias_type--------------------------------
1784 bool Compile::have_alias_type(const TypePtr* adr_type) {

1864   assert(!C->major_progress(), "not cleared");
1865 
1866   if (_for_post_loop_igvn.length() > 0) {
1867     while (_for_post_loop_igvn.length() > 0) {
1868       Node* n = _for_post_loop_igvn.pop();
1869       n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1870       igvn._worklist.push(n);
1871     }
1872     igvn.optimize();
1873     if (failing()) return;
1874     assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1875     assert(C->parse_predicate_count() == 0, "all parse predicates should have been removed now");
1876 
1877     // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1878     if (C->major_progress()) {
1879       C->clear_major_progress(); // ensure that major progress is now clear
1880     }
1881   }
1882 }
1883 






























































































































































































































































































































































































































1884 void Compile::record_unstable_if_trap(UnstableIfTrap* trap) {
1885   if (OptimizeUnstableIf) {
1886     _unstable_if_traps.append(trap);
1887   }
1888 }
1889 
1890 void Compile::remove_useless_unstable_if_traps(Unique_Node_List& useful) {
1891   for (int i = _unstable_if_traps.length() - 1; i >= 0; i--) {
1892     UnstableIfTrap* trap = _unstable_if_traps.at(i);
1893     Node* n = trap->uncommon_trap();
1894     if (!useful.member(n)) {
1895       _unstable_if_traps.delete_at(i); // replaces i-th with last element which is known to be useful (already processed)
1896     }
1897   }
1898 }
1899 
1900 // Remove the unstable if trap associated with 'unc' from candidates. It is either dead
1901 // or fold-compares case. Return true if succeed or not found.
1902 //
1903 // In rare cases, the found trap has been processed. It is too late to delete it. Return

1939       assert(next_bci == iter.next_bci() || next_bci == iter.get_dest(), "wrong next_bci at unstable_if");
1940       Bytecodes::Code c = iter.cur_bc();
1941       Node* lhs = nullptr;
1942       Node* rhs = nullptr;
1943       if (c == Bytecodes::_if_acmpeq || c == Bytecodes::_if_acmpne) {
1944         lhs = unc->peek_operand(0);
1945         rhs = unc->peek_operand(1);
1946       } else if (c == Bytecodes::_ifnull || c == Bytecodes::_ifnonnull) {
1947         lhs = unc->peek_operand(0);
1948       }
1949 
1950       ResourceMark rm;
1951       const MethodLivenessResult& live_locals = method->liveness_at_bci(next_bci);
1952       assert(live_locals.is_valid(), "broken liveness info");
1953       int len = (int)live_locals.size();
1954 
1955       for (int i = 0; i < len; i++) {
1956         Node* local = unc->local(jvms, i);
1957         // kill local using the liveness of next_bci.
1958         // give up when the local looks like an operand to secure reexecution.
1959         if (!live_locals.at(i) && !local->is_top() && local != lhs && local!= rhs) {
1960           uint idx = jvms->locoff() + i;
1961 #ifdef ASSERT
1962           if (PrintOpto && Verbose) {
1963             tty->print("[unstable_if] kill local#%d: ", idx);
1964             local->dump();
1965             tty->cr();
1966           }
1967 #endif
1968           igvn.replace_input_of(unc, idx, top());
1969           modified = true;
1970         }
1971       }
1972     }
1973 
1974     // keep the mondified trap for late query
1975     if (modified) {
1976       trap->set_modified();
1977     } else {
1978       _unstable_if_traps.delete_at(i);
1979     }
1980   }
1981   igvn.optimize();
1982 }
1983 
1984 // StringOpts and late inlining of string methods
1985 void Compile::inline_string_calls(bool parse_time) {
1986   {
1987     // remove useless nodes to make the usage analysis simpler
1988     ResourceMark rm;
1989     PhaseRemoveUseless pru(initial_gvn(), *igvn_worklist());
1990   }
1991 
1992   {
1993     ResourceMark rm;
1994     print_method(PHASE_BEFORE_STRINGOPTS, 3);

2149 
2150   if (_string_late_inlines.length() > 0) {
2151     assert(has_stringbuilder(), "inconsistent");
2152 
2153     inline_string_calls(false);
2154 
2155     if (failing())  return;
2156 
2157     inline_incrementally_cleanup(igvn);
2158   }
2159 
2160   set_inlining_incrementally(false);
2161 }
2162 
2163 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2164   // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2165   // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2166   // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == nullptr"
2167   // as if "inlining_incrementally() == true" were set.
2168   assert(inlining_incrementally() == false, "not allowed");
2169   assert(_modified_nodes == nullptr, "not allowed");



2170   assert(_late_inlines.length() > 0, "sanity");
2171 
2172   while (_late_inlines.length() > 0) {
2173     igvn_worklist()->ensure_empty(); // should be done with igvn
2174 
2175     while (inline_incrementally_one()) {
2176       assert(!failing(), "inconsistent");
2177     }
2178     if (failing())  return;
2179 
2180     inline_incrementally_cleanup(igvn);
2181   }

2182 }
2183 
2184 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2185   if (_loop_opts_cnt > 0) {
2186     while (major_progress() && (_loop_opts_cnt > 0)) {
2187       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2188       PhaseIdealLoop::optimize(igvn, mode);
2189       _loop_opts_cnt--;
2190       if (failing())  return false;
2191       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2192     }
2193   }
2194   return true;
2195 }
2196 
2197 // Remove edges from "root" to each SafePoint at a backward branch.
2198 // They were inserted during parsing (see add_safepoint()) to make
2199 // infinite loops without calls or exceptions visible to root, i.e.,
2200 // useful.
2201 void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) {

2308     print_method(PHASE_ITER_GVN_AFTER_VECTOR, 2);
2309   }
2310   assert(!has_vbox_nodes(), "sanity");
2311 
2312   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2313     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2314     igvn_worklist()->ensure_empty(); // should be done with igvn
2315     {
2316       ResourceMark rm;
2317       PhaseRenumberLive prl(initial_gvn(), *igvn_worklist());
2318     }
2319     igvn.reset_from_gvn(initial_gvn());
2320     igvn.optimize();
2321     if (failing()) return;
2322   }
2323 
2324   // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
2325   // safepoints
2326   remove_root_to_sfpts_edges(igvn);
2327 





2328   if (failing())  return;
2329 
2330   // Perform escape analysis
2331   if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) {
2332     if (has_loops()) {
2333       // Cleanup graph (remove dead nodes).
2334       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2335       PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2336       if (failing())  return;
2337     }
2338     bool progress;
2339     print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2340     do {
2341       ConnectionGraph::do_analysis(this, &igvn);
2342 
2343       if (failing())  return;
2344 
2345       int mcount = macro_count(); // Record number of allocations and locks before IGVN
2346 
2347       // Optimize out fields loads from scalar replaceable allocations.

2431   if (failing())  return;
2432 
2433   // Loop transforms on the ideal graph.  Range Check Elimination,
2434   // peeling, unrolling, etc.
2435   if (!optimize_loops(igvn, LoopOptsDefault)) {
2436     return;
2437   }
2438 
2439   if (failing())  return;
2440 
2441   C->clear_major_progress(); // ensure that major progress is now clear
2442 
2443   process_for_post_loop_opts_igvn(igvn);
2444 
2445   if (failing())  return;
2446 
2447 #ifdef ASSERT
2448   bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2449 #endif
2450 








2451   {
2452     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2453     print_method(PHASE_BEFORE_MACRO_EXPANSION, 3);
2454     PhaseMacroExpand  mex(igvn);
2455     if (mex.expand_macro_nodes()) {
2456       assert(failing(), "must bail out w/ explicit message");
2457       return;
2458     }
2459     print_method(PHASE_AFTER_MACRO_EXPANSION, 2);
2460   }
2461 




2462   {
2463     TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
2464     if (bs->expand_barriers(this, igvn)) {
2465       assert(failing(), "must bail out w/ explicit message");
2466       return;
2467     }
2468     print_method(PHASE_BARRIER_EXPANSION, 2);
2469   }
2470 
2471   if (C->max_vector_size() > 0) {
2472     C->optimize_logic_cones(igvn);
2473     igvn.optimize();
2474     if (failing()) return;
2475   }
2476 
2477   DEBUG_ONLY( _modified_nodes = nullptr; )

2478 
2479   assert(igvn._worklist.size() == 0, "not empty");
2480 
2481   assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2482 
2483   if (_late_inlines.length() > 0) {
2484     // More opportunities to optimize virtual and MH calls.
2485     // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2486     process_late_inline_calls_no_inline(igvn);
2487     if (failing())  return;
2488   }
2489  } // (End scope of igvn; run destructor if necessary for asserts.)
2490 
2491  check_no_dead_use();
2492 
2493  process_print_inlining();
2494 
2495  // We will never use the NodeHash table any more. Clear it so that final_graph_reshaping does not have
2496  // to remove hashes to unlock nodes for modifications.
2497  C->node_hash()->clear();
2498 
2499  // A method with only infinite loops has no edges entering loops from root
2500  {
2501    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2502    if (final_graph_reshaping()) {
2503      assert(failing(), "must bail out w/ explicit message");
2504      return;
2505    }
2506  }
2507 
2508  print_method(PHASE_OPTIMIZE_FINISHED, 2);

3096             // Accumulate any precedence edges
3097             if (mem->in(i) != nullptr) {
3098               n->add_prec(mem->in(i));
3099             }
3100           }
3101           // Everything above this point has been processed.
3102           done = true;
3103         }
3104         // Eliminate the previous StoreCM
3105         prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
3106         assert(mem->outcnt() == 0, "should be dead");
3107         mem->disconnect_inputs(this);
3108       } else {
3109         prev = mem;
3110       }
3111       mem = prev->in(MemNode::Memory);
3112     }
3113   }
3114 }
3115 

3116 //------------------------------final_graph_reshaping_impl----------------------
3117 // Implement items 1-5 from final_graph_reshaping below.
3118 void Compile::final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
3119 
3120   if ( n->outcnt() == 0 ) return; // dead node
3121   uint nop = n->Opcode();
3122 
3123   // Check for 2-input instruction with "last use" on right input.
3124   // Swap to left input.  Implements item (2).
3125   if( n->req() == 3 &&          // two-input instruction
3126       n->in(1)->outcnt() > 1 && // left use is NOT a last use
3127       (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
3128       n->in(2)->outcnt() == 1 &&// right use IS a last use
3129       !n->in(2)->is_Con() ) {   // right use is not a constant
3130     // Check for commutative opcode
3131     switch( nop ) {
3132     case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
3133     case Op_MaxI:  case Op_MaxL:  case Op_MaxF:  case Op_MaxD:
3134     case Op_MinI:  case Op_MinL:  case Op_MinF:  case Op_MinD:
3135     case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:

3248       if (n->outcnt() > 1 &&
3249           !n->is_Proj() &&
3250           nop != Op_CreateEx &&
3251           nop != Op_CheckCastPP &&
3252           nop != Op_DecodeN &&
3253           nop != Op_DecodeNKlass &&
3254           !n->is_Mem() &&
3255           !n->is_Phi()) {
3256         Node *x = n->clone();
3257         call->set_req(TypeFunc::Parms, x);
3258       }
3259     }
3260     break;
3261   }
3262 
3263   case Op_StoreCM:
3264     {
3265       // Convert OopStore dependence into precedence edge
3266       Node* prec = n->in(MemNode::OopStore);
3267       n->del_req(MemNode::OopStore);
3268       n->add_prec(prec);















3269       eliminate_redundant_card_marks(n);
3270     }
3271 
3272     // fall through
3273 
3274   case Op_StoreB:
3275   case Op_StoreC:
3276   case Op_StoreI:
3277   case Op_StoreL:
3278   case Op_CompareAndSwapB:
3279   case Op_CompareAndSwapS:
3280   case Op_CompareAndSwapI:
3281   case Op_CompareAndSwapL:
3282   case Op_CompareAndSwapP:
3283   case Op_CompareAndSwapN:
3284   case Op_WeakCompareAndSwapB:
3285   case Op_WeakCompareAndSwapS:
3286   case Op_WeakCompareAndSwapI:
3287   case Op_WeakCompareAndSwapL:
3288   case Op_WeakCompareAndSwapP:

3869           // Replace all nodes with identical edges as m with m
3870           k->subsume_by(m, this);
3871         }
3872       }
3873     }
3874     break;
3875   }
3876   case Op_CmpUL: {
3877     if (!Matcher::has_match_rule(Op_CmpUL)) {
3878       // No support for unsigned long comparisons
3879       ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3880       Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3881       Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3882       ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3883       Node* andl = new AndLNode(orl, remove_sign_mask);
3884       Node* cmp = new CmpLNode(andl, n->in(2));
3885       n->subsume_by(cmp, this);
3886     }
3887     break;
3888   }







3889   default:
3890     assert(!n->is_Call(), "");
3891     assert(!n->is_Mem(), "");
3892     assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3893     break;
3894   }
3895 }
3896 
3897 //------------------------------final_graph_reshaping_walk---------------------
3898 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3899 // requires that the walk visits a node's inputs before visiting the node.
3900 void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
3901   Unique_Node_List sfpt;
3902 
3903   frc._visited.set(root->_idx); // first, mark node as visited
3904   uint cnt = root->req();
3905   Node *n = root;
3906   uint  i = 0;
3907   while (true) {
3908     if (i < cnt) {

4248   }
4249 }
4250 
4251 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4252   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4253 }
4254 
4255 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4256   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4257 }
4258 
4259 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4260   if (holder->is_initialized()) {
4261     return false;
4262   }
4263   if (holder->is_being_initialized()) {
4264     if (accessing_method->holder() == holder) {
4265       // Access inside a class. The barrier can be elided when access happens in <clinit>,
4266       // <init>, or a static method. In all those cases, there was an initialization
4267       // barrier on the holder klass passed.
4268       if (accessing_method->is_static_initializer() ||
4269           accessing_method->is_object_initializer() ||
4270           accessing_method->is_static()) {
4271         return false;
4272       }
4273     } else if (accessing_method->holder()->is_subclass_of(holder)) {
4274       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4275       // In case of <init> or a static method, the barrier is on the subclass is not enough:
4276       // child class can become fully initialized while its parent class is still being initialized.
4277       if (accessing_method->is_static_initializer()) {
4278         return false;
4279       }
4280     }
4281     ciMethod* root = method(); // the root method of compilation
4282     if (root != accessing_method) {
4283       return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4284     }
4285   }
4286   return true;
4287 }
4288 
4289 #ifndef PRODUCT
4290 //------------------------------verify_bidirectional_edges---------------------
4291 // For each input edge to a node (ie - for each Use-Def edge), verify that
4292 // there is a corresponding Def-Use edge.
4293 void Compile::verify_bidirectional_edges(Unique_Node_List &visited) {
4294   // Allocate stack of size C->live_nodes()/16 to avoid frequent realloc
4295   uint stack_size = live_nodes() >> 4;
4296   Node_List nstack(MAX2(stack_size, (uint)OptoNodeListSize));
4297   nstack.push(_root);

4313       if (in != nullptr && !in->is_top()) {
4314         // Count instances of `next`
4315         int cnt = 0;
4316         for (uint idx = 0; idx < in->_outcnt; idx++) {
4317           if (in->_out[idx] == n) {
4318             cnt++;
4319           }
4320         }
4321         assert(cnt > 0, "Failed to find Def-Use edge.");
4322         // Check for duplicate edges
4323         // walk the input array downcounting the input edges to n
4324         for (uint j = 0; j < length; j++) {
4325           if (n->in(j) == in) {
4326             cnt--;
4327           }
4328         }
4329         assert(cnt == 0, "Mismatched edge count.");
4330       } else if (in == nullptr) {
4331         assert(i == 0 || i >= n->req() ||
4332                n->is_Region() || n->is_Phi() || n->is_ArrayCopy() ||

4333                (n->is_Unlock() && i == (n->req() - 1)) ||
4334                (n->is_MemBar() && i == 5), // the precedence edge to a membar can be removed during macro node expansion
4335               "only region, phi, arraycopy, unlock or membar nodes have null data edges");
4336       } else {
4337         assert(in->is_top(), "sanity");
4338         // Nothing to check.
4339       }
4340     }
4341   }
4342 }
4343 
4344 //------------------------------verify_graph_edges---------------------------
4345 // Walk the Graph and verify that there is a one-to-one correspondence
4346 // between Use-Def edges and Def-Use edges in the graph.
4347 void Compile::verify_graph_edges(bool no_dead_code) {
4348   if (VerifyGraphEdges) {
4349     Unique_Node_List visited;
4350 
4351     // Call graph walk to check edges
4352     verify_bidirectional_edges(visited);
4353     if (no_dead_code) {
4354       // Now make sure that no visited node is used by an unvisited node.
4355       bool dead_nodes = false;

4445 // (1) subklass is already limited to a subtype of superklass => always ok
4446 // (2) subklass does not overlap with superklass => always fail
4447 // (3) superklass has NO subtypes and we can check with a simple compare.
4448 Compile::SubTypeCheckResult Compile::static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk, bool skip) {
4449   if (skip) {
4450     return SSC_full_test;       // Let caller generate the general case.
4451   }
4452 
4453   if (subk->is_java_subtype_of(superk)) {
4454     return SSC_always_true; // (0) and (1)  this test cannot fail
4455   }
4456 
4457   if (!subk->maybe_java_subtype_of(superk)) {
4458     return SSC_always_false; // (2) true path dead; no dynamic test needed
4459   }
4460 
4461   const Type* superelem = superk;
4462   if (superk->isa_aryklassptr()) {
4463     int ignored;
4464     superelem = superk->is_aryklassptr()->base_element_type(ignored);







4465   }
4466 
4467   if (superelem->isa_instklassptr()) {
4468     ciInstanceKlass* ik = superelem->is_instklassptr()->instance_klass();
4469     if (!ik->has_subklass()) {
4470       if (!ik->is_final()) {
4471         // Add a dependency if there is a chance of a later subclass.
4472         dependencies()->assert_leaf_type(ik);
4473       }
4474       if (!superk->maybe_java_subtype_of(subk)) {
4475         return SSC_always_false;
4476       }
4477       return SSC_easy_test;     // (3) caller can do a simple ptr comparison
4478     }
4479   } else {
4480     // A primitive array type has no subtypes.
4481     return SSC_easy_test;       // (3) caller can do a simple ptr comparison
4482   }
4483 
4484   return SSC_full_test;

5044       const Type* t = igvn.type_or_null(n);
5045       assert((t == nullptr) || (t == t->remove_speculative()), "no more speculative types");
5046       if (n->is_Type()) {
5047         t = n->as_Type()->type();
5048         assert(t == t->remove_speculative(), "no more speculative types");
5049       }
5050       // Iterate over outs - endless loops is unreachable from below
5051       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5052         Node *m = n->fast_out(i);
5053         if (not_a_node(m)) {
5054           continue;
5055         }
5056         worklist.push(m);
5057       }
5058     }
5059     igvn.check_no_speculative_types();
5060 #endif
5061   }
5062 }
5063 





















5064 // Auxiliary methods to support randomized stressing/fuzzing.
5065 
5066 void Compile::initialize_stress_seed(const DirectiveSet* directive) {
5067   if (FLAG_IS_DEFAULT(StressSeed) || (FLAG_IS_ERGO(StressSeed) && directive->RepeatCompilationOption)) {
5068     _stress_seed = static_cast<uint>(Ticks::now().nanoseconds());
5069     FLAG_SET_ERGO(StressSeed, _stress_seed);
5070   } else {
5071     _stress_seed = StressSeed;
5072   }
5073   if (_log != nullptr) {
5074     _log->elem("stress_test seed='%u'", _stress_seed);
5075   }
5076 }
5077 
5078 int Compile::random() {
5079   _stress_seed = os::next_random(_stress_seed);
5080   return static_cast<int>(_stress_seed);
5081 }
5082 
5083 // This method can be called the arbitrary number of times, with current count

  40 #include "gc/shared/barrierSet.hpp"
  41 #include "gc/shared/c2/barrierSetC2.hpp"
  42 #include "jfr/jfrEvents.hpp"
  43 #include "jvm_io.h"
  44 #include "memory/allocation.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "opto/addnode.hpp"
  47 #include "opto/block.hpp"
  48 #include "opto/c2compiler.hpp"
  49 #include "opto/callGenerator.hpp"
  50 #include "opto/callnode.hpp"
  51 #include "opto/castnode.hpp"
  52 #include "opto/cfgnode.hpp"
  53 #include "opto/chaitin.hpp"
  54 #include "opto/compile.hpp"
  55 #include "opto/connode.hpp"
  56 #include "opto/convertnode.hpp"
  57 #include "opto/divnode.hpp"
  58 #include "opto/escape.hpp"
  59 #include "opto/idealGraphPrinter.hpp"
  60 #include "opto/inlinetypenode.hpp"
  61 #include "opto/locknode.hpp"
  62 #include "opto/loopnode.hpp"
  63 #include "opto/machnode.hpp"
  64 #include "opto/macro.hpp"
  65 #include "opto/matcher.hpp"
  66 #include "opto/mathexactnode.hpp"
  67 #include "opto/memnode.hpp"
  68 #include "opto/mulnode.hpp"
  69 #include "opto/narrowptrnode.hpp"
  70 #include "opto/node.hpp"
  71 #include "opto/opcodes.hpp"
  72 #include "opto/output.hpp"
  73 #include "opto/parse.hpp"
  74 #include "opto/phaseX.hpp"
  75 #include "opto/rootnode.hpp"
  76 #include "opto/runtime.hpp"
  77 #include "opto/stringopts.hpp"
  78 #include "opto/type.hpp"
  79 #include "opto/vector.hpp"
  80 #include "opto/vectornode.hpp"

 386   // as dead to be conservative about the dead node count at any
 387   // given time.
 388   if (!dead->is_Con()) {
 389     record_dead_node(dead->_idx);
 390   }
 391   if (dead->is_macro()) {
 392     remove_macro_node(dead);
 393   }
 394   if (dead->is_expensive()) {
 395     remove_expensive_node(dead);
 396   }
 397   if (dead->Opcode() == Op_Opaque4) {
 398     remove_template_assertion_predicate_opaq(dead);
 399   }
 400   if (dead->is_ParsePredicate()) {
 401     remove_parse_predicate(dead->as_ParsePredicate());
 402   }
 403   if (dead->for_post_loop_opts_igvn()) {
 404     remove_from_post_loop_opts_igvn(dead);
 405   }
 406   if (dead->is_InlineType()) {
 407     remove_inline_type(dead);
 408   }
 409   if (dead->is_Call()) {
 410     remove_useless_late_inlines(                &_late_inlines, dead);
 411     remove_useless_late_inlines(         &_string_late_inlines, dead);
 412     remove_useless_late_inlines(         &_boxing_late_inlines, dead);
 413     remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
 414 
 415     if (dead->is_CallStaticJava()) {
 416       remove_unstable_if_trap(dead->as_CallStaticJava(), false);
 417     }
 418   }
 419   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 420   bs->unregister_potential_barrier_node(dead);
 421 }
 422 
 423 // Disconnect all useless nodes by disconnecting those at the boundary.
 424 void Compile::disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist) {
 425   uint next = 0;
 426   while (next < useful.size()) {
 427     Node *n = useful.at(next++);
 428     if (n->is_SafePoint()) {

 430       // beyond that point.
 431       n->as_SafePoint()->delete_replaced_nodes();
 432     }
 433     // Use raw traversal of out edges since this code removes out edges
 434     int max = n->outcnt();
 435     for (int j = 0; j < max; ++j) {
 436       Node* child = n->raw_out(j);
 437       if (!useful.member(child)) {
 438         assert(!child->is_top() || child != top(),
 439                "If top is cached in Compile object it is in useful list");
 440         // Only need to remove this out-edge to the useless node
 441         n->raw_del_out(j);
 442         --j;
 443         --max;
 444       }
 445     }
 446     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 447       assert(useful.member(n->unique_out()), "do not push a useless node");
 448       worklist.push(n->unique_out());
 449     }
 450     if (n->outcnt() == 0) {
 451       worklist.push(n);
 452     }
 453   }
 454 
 455   remove_useless_nodes(_macro_nodes,        useful); // remove useless macro nodes
 456   remove_useless_nodes(_parse_predicates,   useful); // remove useless Parse Predicate nodes
 457   remove_useless_nodes(_template_assertion_predicate_opaqs, useful); // remove useless Assertion Predicate opaque nodes
 458   remove_useless_nodes(_expensive_nodes,    useful); // remove useless expensive nodes
 459   remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
 460   remove_useless_nodes(_inline_type_nodes,  useful); // remove useless inline type nodes
 461 #ifdef ASSERT
 462   if (_modified_nodes != nullptr) {
 463     _modified_nodes->remove_useless_nodes(useful.member_set());
 464   }
 465 #endif
 466   remove_useless_unstable_if_traps(useful);          // remove useless unstable_if traps
 467   remove_useless_coarsened_locks(useful);            // remove useless coarsened locks nodes
 468 #ifdef ASSERT
 469   if (_modified_nodes != nullptr) {
 470     _modified_nodes->remove_useless_nodes(useful.member_set());
 471   }
 472 #endif
 473 
 474   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 475   bs->eliminate_useless_gc_barriers(useful, this);
 476   // clean up the late inline lists
 477   remove_useless_late_inlines(                &_late_inlines, useful);
 478   remove_useless_late_inlines(         &_string_late_inlines, useful);
 479   remove_useless_late_inlines(         &_boxing_late_inlines, useful);
 480   remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
 481   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 482 }
 483 
 484 // ============================================================================
 485 //------------------------------CompileWrapper---------------------------------

 625 
 626 
 627 Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
 628                   Options options, DirectiveSet* directive)
 629                 : Phase(Compiler),
 630                   _compile_id(ci_env->compile_id()),
 631                   _options(options),
 632                   _method(target),
 633                   _entry_bci(osr_bci),
 634                   _ilt(nullptr),
 635                   _stub_function(nullptr),
 636                   _stub_name(nullptr),
 637                   _stub_entry_point(nullptr),
 638                   _max_node_limit(MaxNodeLimit),
 639                   _post_loop_opts_phase(false),
 640                   _allow_macro_nodes(true),
 641                   _inlining_progress(false),
 642                   _inlining_incrementally(false),
 643                   _do_cleanup(false),
 644                   _has_reserved_stack_access(target->has_reserved_stack_access()),
 645                   _has_circular_inline_type(false),
 646 #ifndef PRODUCT
 647                   _igv_idx(0),
 648                   _trace_opto_output(directive->TraceOptoOutputOption),
 649 #endif
 650                   _has_method_handle_invokes(false),
 651                   _clinit_barrier_on_entry(false),
 652                   _stress_seed(0),
 653                   _comp_arena(mtCompiler),
 654                   _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 655                   _env(ci_env),
 656                   _directive(directive),
 657                   _log(ci_env->log()),
 658                   _first_failure_details(nullptr),
 659                   _intrinsics        (comp_arena(), 0, 0, nullptr),
 660                   _macro_nodes       (comp_arena(), 8, 0, nullptr),
 661                   _parse_predicates  (comp_arena(), 8, 0, nullptr),
 662                   _template_assertion_predicate_opaqs (comp_arena(), 8, 0, nullptr),
 663                   _expensive_nodes   (comp_arena(), 8, 0, nullptr),
 664                   _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 665                   _inline_type_nodes (comp_arena(), 8, 0, nullptr),
 666                   _unstable_if_traps (comp_arena(), 8, 0, nullptr),
 667                   _coarsened_locks   (comp_arena(), 8, 0, nullptr),
 668                   _congraph(nullptr),
 669                   NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 670                   _unique(0),
 671                   _dead_node_count(0),
 672                   _dead_node_list(comp_arena()),
 673                   _node_arena_one(mtCompiler, Arena::Tag::tag_node),
 674                   _node_arena_two(mtCompiler, Arena::Tag::tag_node),
 675                   _node_arena(&_node_arena_one),
 676                   _mach_constant_base_node(nullptr),
 677                   _Compile_types(mtCompiler),
 678                   _initial_gvn(nullptr),
 679                   _igvn_worklist(nullptr),
 680                   _types(nullptr),
 681                   _node_hash(nullptr),
 682                   _late_inlines(comp_arena(), 2, 0, nullptr),
 683                   _string_late_inlines(comp_arena(), 2, 0, nullptr),
 684                   _boxing_late_inlines(comp_arena(), 2, 0, nullptr),
 685                   _vector_reboxing_late_inlines(comp_arena(), 2, 0, nullptr),

 748 
 749   // GVN that will be run immediately on new nodes
 750   uint estimated_size = method()->code_size()*4+64;
 751   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
 752   _igvn_worklist = new (comp_arena()) Unique_Node_List(comp_arena());
 753   _types = new (comp_arena()) Type_Array(comp_arena());
 754   _node_hash = new (comp_arena()) NodeHash(comp_arena(), estimated_size);
 755   PhaseGVN gvn;
 756   set_initial_gvn(&gvn);
 757 
 758   print_inlining_init();
 759   { // Scope for timing the parser
 760     TracePhase tp("parse", &timers[_t_parser]);
 761 
 762     // Put top into the hash table ASAP.
 763     initial_gvn()->transform(top());
 764 
 765     // Set up tf(), start(), and find a CallGenerator.
 766     CallGenerator* cg = nullptr;
 767     if (is_osr_compilation()) {
 768       init_tf(TypeFunc::make(method(), /* is_osr_compilation = */ true));
 769       StartNode* s = new StartOSRNode(root(), tf()->domain_sig());


 770       initial_gvn()->set_type_bottom(s);
 771       init_start(s);
 772       cg = CallGenerator::for_osr(method(), entry_bci());
 773     } else {
 774       // Normal case.
 775       init_tf(TypeFunc::make(method()));
 776       StartNode* s = new StartNode(root(), tf()->domain_cc());
 777       initial_gvn()->set_type_bottom(s);
 778       init_start(s);
 779       if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
 780         // With java.lang.ref.reference.get() we must go through the
 781         // intrinsic - even when get() is the root
 782         // method of the compile - so that, if necessary, the value in
 783         // the referent field of the reference object gets recorded by
 784         // the pre-barrier code.
 785         cg = find_intrinsic(method(), false);
 786       }
 787       if (cg == nullptr) {
 788         float past_uses = method()->interpreter_invocation_count();
 789         float expected_uses = past_uses;
 790         cg = CallGenerator::for_inline(method(), expected_uses);
 791       }
 792     }
 793     if (failing())  return;
 794     if (cg == nullptr) {
 795       const char* reason = InlineTree::check_can_parse(method());
 796       assert(reason != nullptr, "expect reason for parse failure");

 872     print_ideal_ir("print_ideal");
 873   }
 874 #endif
 875 
 876 #ifdef ASSERT
 877   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 878   bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
 879 #endif
 880 
 881   // Dump compilation data to replay it.
 882   if (directive->DumpReplayOption) {
 883     env()->dump_replay_data(_compile_id);
 884   }
 885   if (directive->DumpInlineOption && (ilt() != nullptr)) {
 886     env()->dump_inline_data(_compile_id);
 887   }
 888 
 889   // Now that we know the size of all the monitors we can add a fixed slot
 890   // for the original deopt pc.
 891   int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
 892   if (needs_stack_repair()) {
 893     // One extra slot for the special stack increment value
 894     next_slot += 2;
 895   }
 896   // TODO 8284443 Only reserve extra slot if needed
 897   if (InlineTypeReturnedAsFields) {
 898     // One extra slot to hold the IsInit information for a nullable
 899     // inline type return if we run out of registers.
 900     next_slot += 2;
 901   }
 902   set_fixed_slots(next_slot);
 903 
 904   // Compute when to use implicit null checks. Used by matching trap based
 905   // nodes and NullCheck optimization.
 906   set_allowed_deopt_reasons();
 907 
 908   // Now generate code
 909   Code_Gen();
 910 }
 911 
 912 //------------------------------Compile----------------------------------------
 913 // Compile a runtime stub
 914 Compile::Compile( ciEnv* ci_env,
 915                   TypeFunc_generator generator,
 916                   address stub_function,
 917                   const char *stub_name,
 918                   int is_fancy_jump,
 919                   bool pass_tls,
 920                   bool return_pc,
 921                   DirectiveSet* directive)
 922   : Phase(Compiler),
 923     _compile_id(0),
 924     _options(Options::for_runtime_stub()),
 925     _method(nullptr),
 926     _entry_bci(InvocationEntryBci),
 927     _stub_function(stub_function),
 928     _stub_name(stub_name),
 929     _stub_entry_point(nullptr),
 930     _max_node_limit(MaxNodeLimit),
 931     _post_loop_opts_phase(false),
 932     _allow_macro_nodes(true),
 933     _inlining_progress(false),
 934     _inlining_incrementally(false),
 935     _has_reserved_stack_access(false),
 936     _has_circular_inline_type(false),
 937 #ifndef PRODUCT
 938     _igv_idx(0),
 939     _trace_opto_output(directive->TraceOptoOutputOption),
 940 #endif
 941     _has_method_handle_invokes(false),
 942     _clinit_barrier_on_entry(false),
 943     _stress_seed(0),
 944     _comp_arena(mtCompiler),
 945     _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 946     _env(ci_env),
 947     _directive(directive),
 948     _log(ci_env->log()),
 949     _first_failure_details(nullptr),
 950     _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 951     _congraph(nullptr),
 952     NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 953     _unique(0),
 954     _dead_node_count(0),
 955     _dead_node_list(comp_arena()),
 956     _node_arena_one(mtCompiler),

1062 
1063   _fixed_slots = 0;
1064   set_has_split_ifs(false);
1065   set_has_loops(false); // first approximation
1066   set_has_stringbuilder(false);
1067   set_has_boxed_value(false);
1068   _trap_can_recompile = false;  // no traps emitted yet
1069   _major_progress = true; // start out assuming good things will happen
1070   set_has_unsafe_access(false);
1071   set_max_vector_size(0);
1072   set_clear_upper_avx(false);  //false as default for clear upper bits of ymm registers
1073   Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1074   set_decompile_count(0);
1075 
1076 #ifndef PRODUCT
1077   Copy::zero_to_bytes(_igv_phase_iter, sizeof(_igv_phase_iter));
1078 #endif
1079 
1080   set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
1081   _loop_opts_cnt = LoopOptsCount;
1082   _has_flat_accesses = false;
1083   _flat_accesses_share_alias = true;
1084   _scalarize_in_safepoints = false;
1085 
1086   set_do_inlining(Inline);
1087   set_max_inline_size(MaxInlineSize);
1088   set_freq_inline_size(FreqInlineSize);
1089   set_do_scheduling(OptoScheduling);
1090 
1091   set_do_vector_loop(false);
1092   set_has_monitors(false);
1093 
1094   if (AllowVectorizeOnDemand) {
1095     if (has_method() && _directive->VectorizeOption) {
1096       set_do_vector_loop(true);
1097       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
1098     } else if (has_method() && method()->name() != 0 &&
1099                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1100       set_do_vector_loop(true);
1101     }
1102   }
1103   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1104   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})
1105 

1354   // If this method has already thrown a range-check,
1355   // assume it was because we already tried range smearing
1356   // and it failed.
1357   uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1358   return !already_trapped;
1359 }
1360 
1361 
1362 //------------------------------flatten_alias_type-----------------------------
1363 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1364   assert(do_aliasing(), "Aliasing should be enabled");
1365   int offset = tj->offset();
1366   TypePtr::PTR ptr = tj->ptr();
1367 
1368   // Known instance (scalarizable allocation) alias only with itself.
1369   bool is_known_inst = tj->isa_oopptr() != nullptr &&
1370                        tj->is_oopptr()->is_known_instance();
1371 
1372   // Process weird unsafe references.
1373   if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1374     bool default_value_load = EnableValhalla && tj->is_instptr()->instance_klass() == ciEnv::current()->Class_klass();
1375     assert(InlineUnsafeOps || StressReflectiveCode || default_value_load, "indeterminate pointers come only from unsafe ops");
1376     assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1377     tj = TypeOopPtr::BOTTOM;
1378     ptr = tj->ptr();
1379     offset = tj->offset();
1380   }
1381 
1382   // Array pointers need some flattening
1383   const TypeAryPtr* ta = tj->isa_aryptr();
1384   if (ta && ta->is_stable()) {
1385     // Erase stability property for alias analysis.
1386     tj = ta = ta->cast_to_stable(false);
1387   }
1388   if (ta && ta->is_not_flat()) {
1389     // Erase not flat property for alias analysis.
1390     tj = ta = ta->cast_to_not_flat(false);
1391   }
1392   if (ta && ta->is_not_null_free()) {
1393     // Erase not null free property for alias analysis.
1394     tj = ta = ta->cast_to_not_null_free(false);
1395   }
1396 
1397   if( ta && is_known_inst ) {
1398     if ( offset != Type::OffsetBot &&
1399          offset > arrayOopDesc::length_offset_in_bytes() ) {
1400       offset = Type::OffsetBot; // Flatten constant access into array body only
1401       tj = ta = ta->
1402               remove_speculative()->
1403               cast_to_ptr_type(ptr)->
1404               with_offset(offset);
1405     }
1406   } else if (ta) {
1407     // For arrays indexed by constant indices, we flatten the alias
1408     // space to include all of the array body.  Only the header, klass
1409     // and array length can be accessed un-aliased.
1410     // For flat inline type array, each field has its own slice so
1411     // we must include the field offset.
1412     if( offset != Type::OffsetBot ) {
1413       if( ta->const_oop() ) { // MethodData* or Method*
1414         offset = Type::OffsetBot;   // Flatten constant access into array body
1415         tj = ta = ta->
1416                 remove_speculative()->
1417                 cast_to_ptr_type(ptr)->
1418                 cast_to_exactness(false)->
1419                 with_offset(offset);
1420       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1421         // range is OK as-is.
1422         tj = ta = TypeAryPtr::RANGE;
1423       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1424         tj = TypeInstPtr::KLASS; // all klass loads look alike
1425         ta = TypeAryPtr::RANGE; // generic ignored junk
1426         ptr = TypePtr::BotPTR;
1427       } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1428         tj = TypeInstPtr::MARK;
1429         ta = TypeAryPtr::RANGE; // generic ignored junk
1430         ptr = TypePtr::BotPTR;
1431       } else {                  // Random constant offset into array body
1432         offset = Type::OffsetBot;   // Flatten constant access into array body
1433         tj = ta = ta->
1434                 remove_speculative()->
1435                 cast_to_ptr_type(ptr)->
1436                 cast_to_exactness(false)->
1437                 with_offset(offset);
1438       }
1439     }
1440     // Arrays of fixed size alias with arrays of unknown size.
1441     if (ta->size() != TypeInt::POS) {
1442       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1443       tj = ta = ta->
1444               remove_speculative()->
1445               cast_to_ptr_type(ptr)->
1446               with_ary(tary)->
1447               cast_to_exactness(false);
1448     }
1449     // Arrays of known objects become arrays of unknown objects.
1450     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1451       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1452       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,Type::Offset(offset), ta->field_offset());
1453     }
1454     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1455       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1456       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,Type::Offset(offset), ta->field_offset());
1457     }
1458     // Initially all flattened array accesses share a single slice
1459     if (ta->is_flat() && ta->elem() != TypeInstPtr::BOTTOM && _flat_accesses_share_alias) {
1460       const TypeAry* tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size(), /* stable= */ false, /* flat= */ true);
1461       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,Type::Offset(offset), Type::Offset(Type::OffsetBot));
1462     }
1463     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1464     // cannot be distinguished by bytecode alone.
1465     if (ta->elem() == TypeInt::BOOL) {
1466       const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1467       ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1468       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset());
1469     }
1470     // During the 2nd round of IterGVN, NotNull castings are removed.
1471     // Make sure the Bottom and NotNull variants alias the same.
1472     // Also, make sure exact and non-exact variants alias the same.
1473     if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != nullptr) {
1474       tj = ta = ta->
1475               remove_speculative()->
1476               cast_to_ptr_type(TypePtr::BotPTR)->
1477               cast_to_exactness(false)->
1478               with_offset(offset);
1479     }
1480   }
1481 
1482   // Oop pointers need some flattening
1483   const TypeInstPtr *to = tj->isa_instptr();
1484   if (to && to != TypeOopPtr::BOTTOM) {
1485     ciInstanceKlass* ik = to->instance_klass();
1486     if( ptr == TypePtr::Constant ) {
1487       if (ik != ciEnv::current()->Class_klass() ||
1488           offset < ik->layout_helper_size_in_bytes()) {

1498     } else if( is_known_inst ) {
1499       tj = to; // Keep NotNull and klass_is_exact for instance type
1500     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1501       // During the 2nd round of IterGVN, NotNull castings are removed.
1502       // Make sure the Bottom and NotNull variants alias the same.
1503       // Also, make sure exact and non-exact variants alias the same.
1504       tj = to = to->
1505               remove_speculative()->
1506               cast_to_instance_id(TypeOopPtr::InstanceBot)->
1507               cast_to_ptr_type(TypePtr::BotPTR)->
1508               cast_to_exactness(false);
1509     }
1510     if (to->speculative() != nullptr) {
1511       tj = to = to->remove_speculative();
1512     }
1513     // Canonicalize the holder of this field
1514     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1515       // First handle header references such as a LoadKlassNode, even if the
1516       // object's klass is unloaded at compile time (4965979).
1517       if (!is_known_inst) { // Do it only for non-instance types
1518         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, nullptr, Type::Offset(offset));
1519       }
1520     } else if (offset < 0 || offset >= ik->layout_helper_size_in_bytes()) {
1521       // Static fields are in the space above the normal instance
1522       // fields in the java.lang.Class instance.
1523       if (ik != ciEnv::current()->Class_klass()) {
1524         to = nullptr;
1525         tj = TypeOopPtr::BOTTOM;
1526         offset = tj->offset();
1527       }
1528     } else {
1529       ciInstanceKlass *canonical_holder = ik->get_canonical_holder(offset);
1530       assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1531       if (!ik->equals(canonical_holder) || tj->offset() != offset) {
1532         if( is_known_inst ) {
1533           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, nullptr, Type::Offset(offset), to->instance_id());
1534         } else {
1535           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, nullptr, Type::Offset(offset));
1536         }
1537       }
1538     }
1539   }
1540 
1541   // Klass pointers to object array klasses need some flattening
1542   const TypeKlassPtr *tk = tj->isa_klassptr();
1543   if( tk ) {
1544     // If we are referencing a field within a Klass, we need
1545     // to assume the worst case of an Object.  Both exact and
1546     // inexact types must flatten to the same alias class so
1547     // use NotNull as the PTR.
1548     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1549       tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull,
1550                                        env()->Object_klass(),
1551                                        Type::Offset(offset));
1552     }
1553 
1554     if (tk->isa_aryklassptr() && tk->is_aryklassptr()->elem()->isa_klassptr()) {
1555       ciKlass* k = ciObjArrayKlass::make(env()->Object_klass());
1556       if (!k || !k->is_loaded()) {                  // Only fails for some -Xcomp runs
1557         tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull, env()->Object_klass(), Type::Offset(offset));
1558       } else {
1559         tj = tk = TypeAryKlassPtr::make(TypePtr::NotNull, tk->is_aryklassptr()->elem(), k, Type::Offset(offset), tk->is_not_flat(), tk->is_not_null_free(), tk->is_null_free());
1560       }
1561     }

1562     // Check for precise loads from the primary supertype array and force them
1563     // to the supertype cache alias index.  Check for generic array loads from
1564     // the primary supertype array and also force them to the supertype cache
1565     // alias index.  Since the same load can reach both, we need to merge
1566     // these 2 disparate memories into the same alias class.  Since the
1567     // primary supertype array is read-only, there's no chance of confusion
1568     // where we bypass an array load and an array store.
1569     int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1570     if (offset == Type::OffsetBot ||
1571         (offset >= primary_supers_offset &&
1572          offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1573         offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1574       offset = in_bytes(Klass::secondary_super_cache_offset());
1575       tj = tk = tk->with_offset(offset);
1576     }
1577   }
1578 
1579   // Flatten all Raw pointers together.
1580   if (tj->base() == Type::RawPtr)
1581     tj = TypeRawPtr::BOTTOM;

1671   intptr_t key = (intptr_t) adr_type;
1672   key ^= key >> logAliasCacheSize;
1673   return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1674 }
1675 
1676 
1677 //-----------------------------grow_alias_types--------------------------------
1678 void Compile::grow_alias_types() {
1679   const int old_ats  = _max_alias_types; // how many before?
1680   const int new_ats  = old_ats;          // how many more?
1681   const int grow_ats = old_ats+new_ats;  // how many now?
1682   _max_alias_types = grow_ats;
1683   _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1684   AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1685   Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1686   for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
1687 }
1688 
1689 
1690 //--------------------------------find_alias_type------------------------------
1691 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field, bool uncached) {
1692   if (!do_aliasing()) {
1693     return alias_type(AliasIdxBot);
1694   }
1695 
1696   AliasCacheEntry* ace = nullptr;
1697   if (!uncached) {
1698     ace = probe_alias_cache(adr_type);
1699     if (ace->_adr_type == adr_type) {
1700       return alias_type(ace->_index);
1701     }
1702   }
1703 
1704   // Handle special cases.
1705   if (adr_type == nullptr)          return alias_type(AliasIdxTop);
1706   if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);
1707 
1708   // Do it the slow way.
1709   const TypePtr* flat = flatten_alias_type(adr_type);
1710 
1711 #ifdef ASSERT
1712   {
1713     ResourceMark rm;
1714     assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1715            Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1716     assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1717            Type::str(adr_type));
1718     if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1719       const TypeOopPtr* foop = flat->is_oopptr();
1720       // Scalarizable allocations have exact klass always.
1721       bool exact = !foop->klass_is_exact() || foop->is_known_instance();

1731     if (alias_type(i)->adr_type() == flat) {
1732       idx = i;
1733       break;
1734     }
1735   }
1736 
1737   if (idx == AliasIdxTop) {
1738     if (no_create)  return nullptr;
1739     // Grow the array if necessary.
1740     if (_num_alias_types == _max_alias_types)  grow_alias_types();
1741     // Add a new alias type.
1742     idx = _num_alias_types++;
1743     _alias_types[idx]->Init(idx, flat);
1744     if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
1745     if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
1746     if (flat->isa_instptr()) {
1747       if (flat->offset() == java_lang_Class::klass_offset()
1748           && flat->is_instptr()->instance_klass() == env()->Class_klass())
1749         alias_type(idx)->set_rewritable(false);
1750     }
1751     ciField* field = nullptr;
1752     if (flat->isa_aryptr()) {
1753 #ifdef ASSERT
1754       const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1755       // (T_BYTE has the weakest alignment and size restrictions...)
1756       assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1757 #endif
1758       const Type* elemtype = flat->is_aryptr()->elem();
1759       if (flat->offset() == TypePtr::OffsetBot) {
1760         alias_type(idx)->set_element(elemtype);
1761       }
1762       int field_offset = flat->is_aryptr()->field_offset().get();
1763       if (flat->is_flat() &&
1764           field_offset != Type::OffsetBot) {
1765         ciInlineKlass* vk = elemtype->inline_klass();
1766         field_offset += vk->first_field_offset();
1767         field = vk->get_field_by_offset(field_offset, false);
1768       }
1769     }
1770     if (flat->isa_klassptr()) {
1771       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1772         alias_type(idx)->set_rewritable(false);
1773       if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1774         alias_type(idx)->set_rewritable(false);
1775       if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1776         alias_type(idx)->set_rewritable(false);
1777       if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1778         alias_type(idx)->set_rewritable(false);
1779       if (flat->offset() == in_bytes(Klass::layout_helper_offset()))
1780         alias_type(idx)->set_rewritable(false);
1781       if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1782         alias_type(idx)->set_rewritable(false);
1783     }
1784     // %%% (We would like to finalize JavaThread::threadObj_offset(),
1785     // but the base pointer type is not distinctive enough to identify
1786     // references into JavaThread.)
1787 
1788     // Check for final fields.
1789     const TypeInstPtr* tinst = flat->isa_instptr();
1790     if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {

1791       if (tinst->const_oop() != nullptr &&
1792           tinst->instance_klass() == ciEnv::current()->Class_klass() &&
1793           tinst->offset() >= (tinst->instance_klass()->layout_helper_size_in_bytes())) {
1794         // static field
1795         ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1796         field = k->get_field_by_offset(tinst->offset(), true);
1797       } else if (tinst->is_inlinetypeptr()) {
1798         // Inline type field
1799         ciInlineKlass* vk = tinst->inline_klass();
1800         field = vk->get_field_by_offset(tinst->offset(), false);
1801       } else {
1802         ciInstanceKlass *k = tinst->instance_klass();
1803         field = k->get_field_by_offset(tinst->offset(), false);
1804       }
1805     }
1806     assert(field == nullptr ||
1807            original_field == nullptr ||
1808            (field->holder() == original_field->holder() &&
1809             field->offset_in_bytes() == original_field->offset_in_bytes() &&
1810             field->is_static() == original_field->is_static()), "wrong field?");
1811     // Set field() and is_rewritable() attributes.
1812     if (field != nullptr) {
1813       alias_type(idx)->set_field(field);
1814       if (flat->isa_aryptr()) {
1815         // Fields of flat arrays are rewritable although they are declared final
1816         assert(flat->is_flat(), "must be a flat array");
1817         alias_type(idx)->set_rewritable(true);
1818       }
1819     }
1820   }
1821 
1822   // Fill the cache for next time.
1823   if (!uncached) {
1824     ace->_adr_type = adr_type;
1825     ace->_index    = idx;
1826     assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");
1827 
1828     // Might as well try to fill the cache for the flattened version, too.
1829     AliasCacheEntry* face = probe_alias_cache(flat);
1830     if (face->_adr_type == nullptr) {
1831       face->_adr_type = flat;
1832       face->_index    = idx;
1833       assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1834     }
1835   }
1836 
1837   return alias_type(idx);
1838 }
1839 
1840 
1841 Compile::AliasType* Compile::alias_type(ciField* field) {
1842   const TypeOopPtr* t;
1843   if (field->is_static())
1844     t = TypeInstPtr::make(field->holder()->java_mirror());
1845   else
1846     t = TypeOopPtr::make_from_klass_raw(field->holder());
1847   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1848   assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1849   return atp;
1850 }
1851 
1852 
1853 //------------------------------have_alias_type--------------------------------
1854 bool Compile::have_alias_type(const TypePtr* adr_type) {

1934   assert(!C->major_progress(), "not cleared");
1935 
1936   if (_for_post_loop_igvn.length() > 0) {
1937     while (_for_post_loop_igvn.length() > 0) {
1938       Node* n = _for_post_loop_igvn.pop();
1939       n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1940       igvn._worklist.push(n);
1941     }
1942     igvn.optimize();
1943     if (failing()) return;
1944     assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1945     assert(C->parse_predicate_count() == 0, "all parse predicates should have been removed now");
1946 
1947     // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1948     if (C->major_progress()) {
1949       C->clear_major_progress(); // ensure that major progress is now clear
1950     }
1951   }
1952 }
1953 
1954 void Compile::add_inline_type(Node* n) {
1955   assert(n->is_InlineType(), "unexpected node");
1956   _inline_type_nodes.push(n);
1957 }
1958 
1959 void Compile::remove_inline_type(Node* n) {
1960   assert(n->is_InlineType(), "unexpected node");
1961   if (_inline_type_nodes.contains(n)) {
1962     _inline_type_nodes.remove(n);
1963   }
1964 }
1965 
1966 // Does the return value keep otherwise useless inline type allocations alive?
1967 static bool return_val_keeps_allocations_alive(Node* ret_val) {
1968   ResourceMark rm;
1969   Unique_Node_List wq;
1970   wq.push(ret_val);
1971   bool some_allocations = false;
1972   for (uint i = 0; i < wq.size(); i++) {
1973     Node* n = wq.at(i);
1974     if (n->outcnt() > 1) {
1975       // Some other use for the allocation
1976       return false;
1977     } else if (n->is_InlineType()) {
1978       wq.push(n->in(1));
1979     } else if (n->is_Phi()) {
1980       for (uint j = 1; j < n->req(); j++) {
1981         wq.push(n->in(j));
1982       }
1983     } else if (n->is_CheckCastPP() &&
1984                n->in(1)->is_Proj() &&
1985                n->in(1)->in(0)->is_Allocate()) {
1986       some_allocations = true;
1987     } else if (n->is_CheckCastPP()) {
1988       wq.push(n->in(1));
1989     }
1990   }
1991   return some_allocations;
1992 }
1993 
1994 void Compile::process_inline_types(PhaseIterGVN &igvn, bool remove) {
1995   // Make sure that the return value does not keep an otherwise unused allocation alive
1996   if (tf()->returns_inline_type_as_fields()) {
1997     Node* ret = nullptr;
1998     for (uint i = 1; i < root()->req(); i++) {
1999       Node* in = root()->in(i);
2000       if (in->Opcode() == Op_Return) {
2001         assert(ret == nullptr, "only one return");
2002         ret = in;
2003       }
2004     }
2005     if (ret != nullptr) {
2006       Node* ret_val = ret->in(TypeFunc::Parms);
2007       if (igvn.type(ret_val)->isa_oopptr() &&
2008           return_val_keeps_allocations_alive(ret_val)) {
2009         igvn.replace_input_of(ret, TypeFunc::Parms, InlineTypeNode::tagged_klass(igvn.type(ret_val)->inline_klass(), igvn));
2010         assert(ret_val->outcnt() == 0, "should be dead now");
2011         igvn.remove_dead_node(ret_val);
2012       }
2013     }
2014   }
2015   if (_inline_type_nodes.length() == 0) {
2016     return;
2017   }
2018   // Scalarize inline types in safepoint debug info.
2019   // Delay this until all inlining is over to avoid getting inconsistent debug info.
2020   set_scalarize_in_safepoints(true);
2021   for (int i = _inline_type_nodes.length()-1; i >= 0; i--) {
2022     InlineTypeNode* vt = _inline_type_nodes.at(i)->as_InlineType();
2023     vt->make_scalar_in_safepoints(&igvn);
2024     igvn.record_for_igvn(vt);
2025   }
2026   if (remove) {
2027     // Remove inline type nodes by replacing them with their oop input
2028     while (_inline_type_nodes.length() > 0) {
2029       InlineTypeNode* vt = _inline_type_nodes.pop()->as_InlineType();
2030       if (vt->outcnt() == 0) {
2031         igvn.remove_dead_node(vt);
2032         continue;
2033       }
2034       for (DUIterator i = vt->outs(); vt->has_out(i); i++) {
2035         DEBUG_ONLY(bool must_be_buffered = false);
2036         Node* u = vt->out(i);
2037         // Check if any users are blackholes. If so, rewrite them to use either the
2038         // allocated buffer, or individual components, instead of the inline type node
2039         // that goes away.
2040         if (u->is_Blackhole()) {
2041           BlackholeNode* bh = u->as_Blackhole();
2042 
2043           // Unlink the old input
2044           int idx = bh->find_edge(vt);
2045           assert(idx != -1, "The edge should be there");
2046           bh->del_req(idx);
2047           --i;
2048 
2049           if (vt->is_allocated(&igvn)) {
2050             // Already has the allocated instance, blackhole that
2051             bh->add_req(vt->get_oop());
2052           } else {
2053             // Not allocated yet, blackhole the components
2054             for (uint c = 0; c < vt->field_count(); c++) {
2055               bh->add_req(vt->field_value(c));
2056             }
2057           }
2058 
2059           // Node modified, record for IGVN
2060           igvn.record_for_igvn(bh);
2061         }
2062 #ifdef ASSERT
2063         // Verify that inline type is buffered when replacing by oop
2064         else if (u->is_InlineType()) {
2065           // InlineType uses don't need buffering because they are about to be replaced as well
2066         } else if (u->is_Phi()) {
2067           // TODO 8302217 Remove this once InlineTypeNodes are reliably pushed through
2068         } else {
2069           must_be_buffered = true;
2070         }
2071         if (must_be_buffered && !vt->is_allocated(&igvn)) {
2072           vt->dump(0);
2073           u->dump(0);
2074           assert(false, "Should have been buffered");
2075         }
2076 #endif
2077       }
2078       igvn.replace_node(vt, vt->get_oop());
2079     }
2080   }
2081   igvn.optimize();
2082 }
2083 
2084 void Compile::adjust_flat_array_access_aliases(PhaseIterGVN& igvn) {
2085   if (!_has_flat_accesses) {
2086     return;
2087   }
2088   // Initially, all flat array accesses share the same slice to
2089   // keep dependencies with Object[] array accesses (that could be
2090   // to a flat array) correct. We're done with parsing so we
2091   // now know all flat array accesses in this compile
2092   // unit. Let's move flat array accesses to their own slice,
2093   // one per element field. This should help memory access
2094   // optimizations.
2095   ResourceMark rm;
2096   Unique_Node_List wq;
2097   wq.push(root());
2098 
2099   Node_List mergememnodes;
2100   Node_List memnodes;
2101 
2102   // Alias index currently shared by all flat memory accesses
2103   int index = get_alias_index(TypeAryPtr::INLINES);
2104 
2105   // Find MergeMem nodes and flat array accesses
2106   for (uint i = 0; i < wq.size(); i++) {
2107     Node* n = wq.at(i);
2108     if (n->is_Mem()) {
2109       const TypePtr* adr_type = nullptr;
2110       if (n->Opcode() == Op_StoreCM) {
2111         adr_type = get_adr_type(get_alias_index(n->in(MemNode::OopStore)->adr_type()));
2112       } else {
2113         adr_type = get_adr_type(get_alias_index(n->adr_type()));
2114       }
2115       if (adr_type == TypeAryPtr::INLINES) {
2116         memnodes.push(n);
2117       }
2118     } else if (n->is_MergeMem()) {
2119       MergeMemNode* mm = n->as_MergeMem();
2120       if (mm->memory_at(index) != mm->base_memory()) {
2121         mergememnodes.push(n);
2122       }
2123     }
2124     for (uint j = 0; j < n->req(); j++) {
2125       Node* m = n->in(j);
2126       if (m != nullptr) {
2127         wq.push(m);
2128       }
2129     }
2130   }
2131 
2132   if (memnodes.size() > 0) {
2133     _flat_accesses_share_alias = false;
2134 
2135     // We are going to change the slice for the flat array
2136     // accesses so we need to clear the cache entries that refer to
2137     // them.
2138     for (uint i = 0; i < AliasCacheSize; i++) {
2139       AliasCacheEntry* ace = &_alias_cache[i];
2140       if (ace->_adr_type != nullptr &&
2141           ace->_adr_type->is_flat()) {
2142         ace->_adr_type = nullptr;
2143         ace->_index = (i != 0) ? 0 : AliasIdxTop; // Make sure the nullptr adr_type resolves to AliasIdxTop
2144       }
2145     }
2146 
2147     // Find what aliases we are going to add
2148     int start_alias = num_alias_types()-1;
2149     int stop_alias = 0;
2150 
2151     for (uint i = 0; i < memnodes.size(); i++) {
2152       Node* m = memnodes.at(i);
2153       const TypePtr* adr_type = nullptr;
2154       if (m->Opcode() == Op_StoreCM) {
2155         adr_type = m->in(MemNode::OopStore)->adr_type();
2156         if (adr_type != TypeAryPtr::INLINES) {
2157           // store was optimized out and we lost track of the adr_type
2158           Node* clone = new StoreCMNode(m->in(MemNode::Control), m->in(MemNode::Memory), m->in(MemNode::Address),
2159                                         m->adr_type(), m->in(MemNode::ValueIn), m->in(MemNode::OopStore),
2160                                         get_alias_index(adr_type));
2161           igvn.register_new_node_with_optimizer(clone);
2162           igvn.replace_node(m, clone);
2163         }
2164       } else {
2165         adr_type = m->adr_type();
2166 #ifdef ASSERT
2167         m->as_Mem()->set_adr_type(adr_type);
2168 #endif
2169       }
2170       int idx = get_alias_index(adr_type);
2171       start_alias = MIN2(start_alias, idx);
2172       stop_alias = MAX2(stop_alias, idx);
2173     }
2174 
2175     assert(stop_alias >= start_alias, "should have expanded aliases");
2176 
2177     Node_Stack stack(0);
2178 #ifdef ASSERT
2179     VectorSet seen(Thread::current()->resource_area());
2180 #endif
2181     // Now let's fix the memory graph so each flat array access
2182     // is moved to the right slice. Start from the MergeMem nodes.
2183     uint last = unique();
2184     for (uint i = 0; i < mergememnodes.size(); i++) {
2185       MergeMemNode* current = mergememnodes.at(i)->as_MergeMem();
2186       Node* n = current->memory_at(index);
2187       MergeMemNode* mm = nullptr;
2188       do {
2189         // Follow memory edges through memory accesses, phis and
2190         // narrow membars and push nodes on the stack. Once we hit
2191         // bottom memory, we pop element off the stack one at a
2192         // time, in reverse order, and move them to the right slice
2193         // by changing their memory edges.
2194         if ((n->is_Phi() && n->adr_type() != TypePtr::BOTTOM) || n->is_Mem() || n->adr_type() == TypeAryPtr::INLINES) {
2195           assert(!seen.test_set(n->_idx), "");
2196           // Uses (a load for instance) will need to be moved to the
2197           // right slice as well and will get a new memory state
2198           // that we don't know yet. The use could also be the
2199           // backedge of a loop. We put a place holder node between
2200           // the memory node and its uses. We replace that place
2201           // holder with the correct memory state once we know it,
2202           // i.e. when nodes are popped off the stack. Using the
2203           // place holder make the logic work in the presence of
2204           // loops.
2205           if (n->outcnt() > 1) {
2206             Node* place_holder = nullptr;
2207             assert(!n->has_out_with(Op_Node), "");
2208             for (DUIterator k = n->outs(); n->has_out(k); k++) {
2209               Node* u = n->out(k);
2210               if (u != current && u->_idx < last) {
2211                 bool success = false;
2212                 for (uint l = 0; l < u->req(); l++) {
2213                   if (!stack.is_empty() && u == stack.node() && l == stack.index()) {
2214                     continue;
2215                   }
2216                   Node* in = u->in(l);
2217                   if (in == n) {
2218                     if (place_holder == nullptr) {
2219                       place_holder = new Node(1);
2220                       place_holder->init_req(0, n);
2221                     }
2222                     igvn.replace_input_of(u, l, place_holder);
2223                     success = true;
2224                   }
2225                 }
2226                 if (success) {
2227                   --k;
2228                 }
2229               }
2230             }
2231           }
2232           if (n->is_Phi()) {
2233             stack.push(n, 1);
2234             n = n->in(1);
2235           } else if (n->is_Mem()) {
2236             stack.push(n, n->req());
2237             n = n->in(MemNode::Memory);
2238           } else {
2239             assert(n->is_Proj() && n->in(0)->Opcode() == Op_MemBarCPUOrder, "");
2240             stack.push(n, n->req());
2241             n = n->in(0)->in(TypeFunc::Memory);
2242           }
2243         } else {
2244           assert(n->adr_type() == TypePtr::BOTTOM || (n->Opcode() == Op_Node && n->_idx >= last) || (n->is_Proj() && n->in(0)->is_Initialize()), "");
2245           // Build a new MergeMem node to carry the new memory state
2246           // as we build it. IGVN should fold extraneous MergeMem
2247           // nodes.
2248           mm = MergeMemNode::make(n);
2249           igvn.register_new_node_with_optimizer(mm);
2250           while (stack.size() > 0) {
2251             Node* m = stack.node();
2252             uint idx = stack.index();
2253             if (m->is_Mem()) {
2254               // Move memory node to its new slice
2255               const TypePtr* adr_type = m->adr_type();
2256               int alias = get_alias_index(adr_type);
2257               Node* prev = mm->memory_at(alias);
2258               igvn.replace_input_of(m, MemNode::Memory, prev);
2259               mm->set_memory_at(alias, m);
2260             } else if (m->is_Phi()) {
2261               // We need as many new phis as there are new aliases
2262               igvn.replace_input_of(m, idx, mm);
2263               if (idx == m->req()-1) {
2264                 Node* r = m->in(0);
2265                 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2266                   const TypePtr* adr_type = get_adr_type(j);
2267                   if (!adr_type->isa_aryptr() || !adr_type->is_flat() || j == (uint)index) {
2268                     continue;
2269                   }
2270                   Node* phi = new PhiNode(r, Type::MEMORY, get_adr_type(j));
2271                   igvn.register_new_node_with_optimizer(phi);
2272                   for (uint k = 1; k < m->req(); k++) {
2273                     phi->init_req(k, m->in(k)->as_MergeMem()->memory_at(j));
2274                   }
2275                   mm->set_memory_at(j, phi);
2276                 }
2277                 Node* base_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2278                 igvn.register_new_node_with_optimizer(base_phi);
2279                 for (uint k = 1; k < m->req(); k++) {
2280                   base_phi->init_req(k, m->in(k)->as_MergeMem()->base_memory());
2281                 }
2282                 mm->set_base_memory(base_phi);
2283               }
2284             } else {
2285               // This is a MemBarCPUOrder node from
2286               // Parse::array_load()/Parse::array_store(), in the
2287               // branch that handles flat arrays hidden under
2288               // an Object[] array. We also need one new membar per
2289               // new alias to keep the unknown access that the
2290               // membars protect properly ordered with accesses to
2291               // known flat array.
2292               assert(m->is_Proj(), "projection expected");
2293               Node* ctrl = m->in(0)->in(TypeFunc::Control);
2294               igvn.replace_input_of(m->in(0), TypeFunc::Control, top());
2295               for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2296                 const TypePtr* adr_type = get_adr_type(j);
2297                 if (!adr_type->isa_aryptr() || !adr_type->is_flat() || j == (uint)index) {
2298                   continue;
2299                 }
2300                 MemBarNode* mb = new MemBarCPUOrderNode(this, j, nullptr);
2301                 igvn.register_new_node_with_optimizer(mb);
2302                 Node* mem = mm->memory_at(j);
2303                 mb->init_req(TypeFunc::Control, ctrl);
2304                 mb->init_req(TypeFunc::Memory, mem);
2305                 ctrl = new ProjNode(mb, TypeFunc::Control);
2306                 igvn.register_new_node_with_optimizer(ctrl);
2307                 mem = new ProjNode(mb, TypeFunc::Memory);
2308                 igvn.register_new_node_with_optimizer(mem);
2309                 mm->set_memory_at(j, mem);
2310               }
2311               igvn.replace_node(m->in(0)->as_Multi()->proj_out(TypeFunc::Control), ctrl);
2312             }
2313             if (idx < m->req()-1) {
2314               idx += 1;
2315               stack.set_index(idx);
2316               n = m->in(idx);
2317               break;
2318             }
2319             // Take care of place holder nodes
2320             if (m->has_out_with(Op_Node)) {
2321               Node* place_holder = m->find_out_with(Op_Node);
2322               if (place_holder != nullptr) {
2323                 Node* mm_clone = mm->clone();
2324                 igvn.register_new_node_with_optimizer(mm_clone);
2325                 Node* hook = new Node(1);
2326                 hook->init_req(0, mm);
2327                 igvn.replace_node(place_holder, mm_clone);
2328                 hook->destruct(&igvn);
2329               }
2330               assert(!m->has_out_with(Op_Node), "place holder should be gone now");
2331             }
2332             stack.pop();
2333           }
2334         }
2335       } while(stack.size() > 0);
2336       // Fix the memory state at the MergeMem we started from
2337       igvn.rehash_node_delayed(current);
2338       for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2339         const TypePtr* adr_type = get_adr_type(j);
2340         if (!adr_type->isa_aryptr() || !adr_type->is_flat()) {
2341           continue;
2342         }
2343         current->set_memory_at(j, mm);
2344       }
2345       current->set_memory_at(index, current->base_memory());
2346     }
2347     igvn.optimize();
2348   }
2349   print_method(PHASE_SPLIT_INLINES_ARRAY, 2);
2350 #ifdef ASSERT
2351   if (!_flat_accesses_share_alias) {
2352     wq.clear();
2353     wq.push(root());
2354     for (uint i = 0; i < wq.size(); i++) {
2355       Node* n = wq.at(i);
2356       assert(n->adr_type() != TypeAryPtr::INLINES, "should have been removed from the graph");
2357       for (uint j = 0; j < n->req(); j++) {
2358         Node* m = n->in(j);
2359         if (m != nullptr) {
2360           wq.push(m);
2361         }
2362       }
2363     }
2364   }
2365 #endif
2366 }
2367 
2368 void Compile::record_unstable_if_trap(UnstableIfTrap* trap) {
2369   if (OptimizeUnstableIf) {
2370     _unstable_if_traps.append(trap);
2371   }
2372 }
2373 
2374 void Compile::remove_useless_unstable_if_traps(Unique_Node_List& useful) {
2375   for (int i = _unstable_if_traps.length() - 1; i >= 0; i--) {
2376     UnstableIfTrap* trap = _unstable_if_traps.at(i);
2377     Node* n = trap->uncommon_trap();
2378     if (!useful.member(n)) {
2379       _unstable_if_traps.delete_at(i); // replaces i-th with last element which is known to be useful (already processed)
2380     }
2381   }
2382 }
2383 
2384 // Remove the unstable if trap associated with 'unc' from candidates. It is either dead
2385 // or fold-compares case. Return true if succeed or not found.
2386 //
2387 // In rare cases, the found trap has been processed. It is too late to delete it. Return

2423       assert(next_bci == iter.next_bci() || next_bci == iter.get_dest(), "wrong next_bci at unstable_if");
2424       Bytecodes::Code c = iter.cur_bc();
2425       Node* lhs = nullptr;
2426       Node* rhs = nullptr;
2427       if (c == Bytecodes::_if_acmpeq || c == Bytecodes::_if_acmpne) {
2428         lhs = unc->peek_operand(0);
2429         rhs = unc->peek_operand(1);
2430       } else if (c == Bytecodes::_ifnull || c == Bytecodes::_ifnonnull) {
2431         lhs = unc->peek_operand(0);
2432       }
2433 
2434       ResourceMark rm;
2435       const MethodLivenessResult& live_locals = method->liveness_at_bci(next_bci);
2436       assert(live_locals.is_valid(), "broken liveness info");
2437       int len = (int)live_locals.size();
2438 
2439       for (int i = 0; i < len; i++) {
2440         Node* local = unc->local(jvms, i);
2441         // kill local using the liveness of next_bci.
2442         // give up when the local looks like an operand to secure reexecution.
2443         if (!live_locals.at(i) && !local->is_top() && local != lhs && local != rhs) {
2444           uint idx = jvms->locoff() + i;
2445 #ifdef ASSERT
2446           if (PrintOpto && Verbose) {
2447             tty->print("[unstable_if] kill local#%d: ", idx);
2448             local->dump();
2449             tty->cr();
2450           }
2451 #endif
2452           igvn.replace_input_of(unc, idx, top());
2453           modified = true;
2454         }
2455       }
2456     }
2457 
2458     // keep the modified trap for late query
2459     if (modified) {
2460       trap->set_modified();
2461     } else {
2462       _unstable_if_traps.delete_at(i);
2463     }
2464   }
2465   igvn.optimize();
2466 }
2467 
2468 // StringOpts and late inlining of string methods
2469 void Compile::inline_string_calls(bool parse_time) {
2470   {
2471     // remove useless nodes to make the usage analysis simpler
2472     ResourceMark rm;
2473     PhaseRemoveUseless pru(initial_gvn(), *igvn_worklist());
2474   }
2475 
2476   {
2477     ResourceMark rm;
2478     print_method(PHASE_BEFORE_STRINGOPTS, 3);

2633 
2634   if (_string_late_inlines.length() > 0) {
2635     assert(has_stringbuilder(), "inconsistent");
2636 
2637     inline_string_calls(false);
2638 
2639     if (failing())  return;
2640 
2641     inline_incrementally_cleanup(igvn);
2642   }
2643 
2644   set_inlining_incrementally(false);
2645 }
2646 
2647 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2648   // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2649   // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2650   // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == nullptr"
2651   // as if "inlining_incrementally() == true" were set.
2652   assert(inlining_incrementally() == false, "not allowed");
2653 #ifdef ASSERT
2654   Unique_Node_List* modified_nodes = _modified_nodes;
2655   _modified_nodes = nullptr;
2656 #endif
2657   assert(_late_inlines.length() > 0, "sanity");
2658 
2659   while (_late_inlines.length() > 0) {
2660     igvn_worklist()->ensure_empty(); // should be done with igvn
2661 
2662     while (inline_incrementally_one()) {
2663       assert(!failing(), "inconsistent");
2664     }
2665     if (failing())  return;
2666 
2667     inline_incrementally_cleanup(igvn);
2668   }
2669   DEBUG_ONLY( _modified_nodes = modified_nodes; )
2670 }
2671 
2672 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2673   if (_loop_opts_cnt > 0) {
2674     while (major_progress() && (_loop_opts_cnt > 0)) {
2675       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2676       PhaseIdealLoop::optimize(igvn, mode);
2677       _loop_opts_cnt--;
2678       if (failing())  return false;
2679       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2680     }
2681   }
2682   return true;
2683 }
2684 
2685 // Remove edges from "root" to each SafePoint at a backward branch.
2686 // They were inserted during parsing (see add_safepoint()) to make
2687 // infinite loops without calls or exceptions visible to root, i.e.,
2688 // useful.
2689 void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) {

2796     print_method(PHASE_ITER_GVN_AFTER_VECTOR, 2);
2797   }
2798   assert(!has_vbox_nodes(), "sanity");
2799 
2800   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2801     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2802     igvn_worklist()->ensure_empty(); // should be done with igvn
2803     {
2804       ResourceMark rm;
2805       PhaseRenumberLive prl(initial_gvn(), *igvn_worklist());
2806     }
2807     igvn.reset_from_gvn(initial_gvn());
2808     igvn.optimize();
2809     if (failing()) return;
2810   }
2811 
2812   // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
2813   // safepoints
2814   remove_root_to_sfpts_edges(igvn);
2815 
2816   // Process inline type nodes now that all inlining is over
2817   process_inline_types(igvn);
2818 
2819   adjust_flat_array_access_aliases(igvn);
2820 
2821   if (failing())  return;
2822 
2823   // Perform escape analysis
2824   if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) {
2825     if (has_loops()) {
2826       // Cleanup graph (remove dead nodes).
2827       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2828       PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2829       if (failing())  return;
2830     }
2831     bool progress;
2832     print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2833     do {
2834       ConnectionGraph::do_analysis(this, &igvn);
2835 
2836       if (failing())  return;
2837 
2838       int mcount = macro_count(); // Record number of allocations and locks before IGVN
2839 
2840       // Optimize out fields loads from scalar replaceable allocations.

2924   if (failing())  return;
2925 
2926   // Loop transforms on the ideal graph.  Range Check Elimination,
2927   // peeling, unrolling, etc.
2928   if (!optimize_loops(igvn, LoopOptsDefault)) {
2929     return;
2930   }
2931 
2932   if (failing())  return;
2933 
2934   C->clear_major_progress(); // ensure that major progress is now clear
2935 
2936   process_for_post_loop_opts_igvn(igvn);
2937 
2938   if (failing())  return;
2939 
2940 #ifdef ASSERT
2941   bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2942 #endif
2943 
2944   assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2945 
2946   if (_late_inlines.length() > 0) {
2947     // More opportunities to optimize virtual and MH calls.
2948     // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2949     process_late_inline_calls_no_inline(igvn);
2950   }
2951 
2952   {
2953     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2954     print_method(PHASE_BEFORE_MACRO_EXPANSION, 3);
2955     PhaseMacroExpand  mex(igvn);
2956     if (mex.expand_macro_nodes()) {
2957       assert(failing(), "must bail out w/ explicit message");
2958       return;
2959     }
2960     print_method(PHASE_AFTER_MACRO_EXPANSION, 2);
2961   }
2962 
2963   // Process inline type nodes again and remove them. From here
2964   // on we don't need to keep track of field values anymore.
2965   process_inline_types(igvn, /* remove= */ true);
2966 
2967   {
2968     TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
2969     if (bs->expand_barriers(this, igvn)) {
2970       assert(failing(), "must bail out w/ explicit message");
2971       return;
2972     }
2973     print_method(PHASE_BARRIER_EXPANSION, 2);
2974   }
2975 
2976   if (C->max_vector_size() > 0) {
2977     C->optimize_logic_cones(igvn);
2978     igvn.optimize();
2979     if (failing()) return;
2980   }
2981 
2982   DEBUG_ONLY( _modified_nodes = nullptr; )
2983   DEBUG_ONLY( _late_inlines.clear(); )
2984 
2985   assert(igvn._worklist.size() == 0, "not empty");









2986  } // (End scope of igvn; run destructor if necessary for asserts.)
2987 
2988  check_no_dead_use();
2989 
2990  process_print_inlining();
2991 
2992  // We will never use the NodeHash table any more. Clear it so that final_graph_reshaping does not have
2993  // to remove hashes to unlock nodes for modifications.
2994  C->node_hash()->clear();
2995 
2996  // A method with only infinite loops has no edges entering loops from root
2997  {
2998    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2999    if (final_graph_reshaping()) {
3000      assert(failing(), "must bail out w/ explicit message");
3001      return;
3002    }
3003  }
3004 
3005  print_method(PHASE_OPTIMIZE_FINISHED, 2);

3593             // Accumulate any precedence edges
3594             if (mem->in(i) != nullptr) {
3595               n->add_prec(mem->in(i));
3596             }
3597           }
3598           // Everything above this point has been processed.
3599           done = true;
3600         }
3601         // Eliminate the previous StoreCM
3602         prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
3603         assert(mem->outcnt() == 0, "should be dead");
3604         mem->disconnect_inputs(this);
3605       } else {
3606         prev = mem;
3607       }
3608       mem = prev->in(MemNode::Memory);
3609     }
3610   }
3611 }
3612 
3613 
3614 //------------------------------final_graph_reshaping_impl----------------------
3615 // Implement items 1-5 from final_graph_reshaping below.
3616 void Compile::final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
3617 
3618   if ( n->outcnt() == 0 ) return; // dead node
3619   uint nop = n->Opcode();
3620 
3621   // Check for 2-input instruction with "last use" on right input.
3622   // Swap to left input.  Implements item (2).
3623   if( n->req() == 3 &&          // two-input instruction
3624       n->in(1)->outcnt() > 1 && // left use is NOT a last use
3625       (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
3626       n->in(2)->outcnt() == 1 &&// right use IS a last use
3627       !n->in(2)->is_Con() ) {   // right use is not a constant
3628     // Check for commutative opcode
3629     switch( nop ) {
3630     case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
3631     case Op_MaxI:  case Op_MaxL:  case Op_MaxF:  case Op_MaxD:
3632     case Op_MinI:  case Op_MinL:  case Op_MinF:  case Op_MinD:
3633     case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:

3746       if (n->outcnt() > 1 &&
3747           !n->is_Proj() &&
3748           nop != Op_CreateEx &&
3749           nop != Op_CheckCastPP &&
3750           nop != Op_DecodeN &&
3751           nop != Op_DecodeNKlass &&
3752           !n->is_Mem() &&
3753           !n->is_Phi()) {
3754         Node *x = n->clone();
3755         call->set_req(TypeFunc::Parms, x);
3756       }
3757     }
3758     break;
3759   }
3760 
3761   case Op_StoreCM:
3762     {
3763       // Convert OopStore dependence into precedence edge
3764       Node* prec = n->in(MemNode::OopStore);
3765       n->del_req(MemNode::OopStore);
3766       if (prec->is_MergeMem()) {
3767         MergeMemNode* mm = prec->as_MergeMem();
3768         Node* base = mm->base_memory();
3769         for (int i = AliasIdxRaw + 1; i < num_alias_types(); i++) {
3770           const TypePtr* adr_type = get_adr_type(i);
3771           if (adr_type->is_flat()) {
3772             Node* m = mm->memory_at(i);
3773             n->add_prec(m);
3774           }
3775         }
3776         if (mm->outcnt() == 0) {
3777           mm->disconnect_inputs(this);
3778         }
3779       } else {
3780         n->add_prec(prec);
3781       }
3782       eliminate_redundant_card_marks(n);
3783     }
3784 
3785     // fall through
3786 
3787   case Op_StoreB:
3788   case Op_StoreC:
3789   case Op_StoreI:
3790   case Op_StoreL:
3791   case Op_CompareAndSwapB:
3792   case Op_CompareAndSwapS:
3793   case Op_CompareAndSwapI:
3794   case Op_CompareAndSwapL:
3795   case Op_CompareAndSwapP:
3796   case Op_CompareAndSwapN:
3797   case Op_WeakCompareAndSwapB:
3798   case Op_WeakCompareAndSwapS:
3799   case Op_WeakCompareAndSwapI:
3800   case Op_WeakCompareAndSwapL:
3801   case Op_WeakCompareAndSwapP:

4382           // Replace all nodes with identical edges as m with m
4383           k->subsume_by(m, this);
4384         }
4385       }
4386     }
4387     break;
4388   }
4389   case Op_CmpUL: {
4390     if (!Matcher::has_match_rule(Op_CmpUL)) {
4391       // No support for unsigned long comparisons
4392       ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
4393       Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
4394       Node* orl = new OrLNode(n->in(1), sign_bit_mask);
4395       ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
4396       Node* andl = new AndLNode(orl, remove_sign_mask);
4397       Node* cmp = new CmpLNode(andl, n->in(2));
4398       n->subsume_by(cmp, this);
4399     }
4400     break;
4401   }
4402 #ifdef ASSERT
4403   case Op_InlineType: {
4404     n->dump(-1);
4405     assert(false, "inline type node was not removed");
4406     break;
4407   }
4408 #endif
4409   default:
4410     assert(!n->is_Call(), "");
4411     assert(!n->is_Mem(), "");
4412     assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
4413     break;
4414   }
4415 }
4416 
4417 //------------------------------final_graph_reshaping_walk---------------------
4418 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
4419 // requires that the walk visits a node's inputs before visiting the node.
4420 void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
4421   Unique_Node_List sfpt;
4422 
4423   frc._visited.set(root->_idx); // first, mark node as visited
4424   uint cnt = root->req();
4425   Node *n = root;
4426   uint  i = 0;
4427   while (true) {
4428     if (i < cnt) {

4768   }
4769 }
4770 
4771 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4772   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4773 }
4774 
4775 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4776   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4777 }
4778 
4779 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4780   if (holder->is_initialized()) {
4781     return false;
4782   }
4783   if (holder->is_being_initialized()) {
4784     if (accessing_method->holder() == holder) {
4785       // Access inside a class. The barrier can be elided when access happens in <clinit>,
4786       // <init>, or a static method. In all those cases, there was an initialization
4787       // barrier on the holder klass passed.
4788       if (accessing_method->is_class_initializer() ||
4789           accessing_method->is_object_constructor() ||
4790           accessing_method->is_static()) {
4791         return false;
4792       }
4793     } else if (accessing_method->holder()->is_subclass_of(holder)) {
4794       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4795       // In case of <init> or a static method, the barrier is on the subclass is not enough:
4796       // child class can become fully initialized while its parent class is still being initialized.
4797       if (accessing_method->is_class_initializer()) {
4798         return false;
4799       }
4800     }
4801     ciMethod* root = method(); // the root method of compilation
4802     if (root != accessing_method) {
4803       return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4804     }
4805   }
4806   return true;
4807 }
4808 
4809 #ifndef PRODUCT
4810 //------------------------------verify_bidirectional_edges---------------------
4811 // For each input edge to a node (ie - for each Use-Def edge), verify that
4812 // there is a corresponding Def-Use edge.
4813 void Compile::verify_bidirectional_edges(Unique_Node_List &visited) {
4814   // Allocate stack of size C->live_nodes()/16 to avoid frequent realloc
4815   uint stack_size = live_nodes() >> 4;
4816   Node_List nstack(MAX2(stack_size, (uint)OptoNodeListSize));
4817   nstack.push(_root);

4833       if (in != nullptr && !in->is_top()) {
4834         // Count instances of `next`
4835         int cnt = 0;
4836         for (uint idx = 0; idx < in->_outcnt; idx++) {
4837           if (in->_out[idx] == n) {
4838             cnt++;
4839           }
4840         }
4841         assert(cnt > 0, "Failed to find Def-Use edge.");
4842         // Check for duplicate edges
4843         // walk the input array downcounting the input edges to n
4844         for (uint j = 0; j < length; j++) {
4845           if (n->in(j) == in) {
4846             cnt--;
4847           }
4848         }
4849         assert(cnt == 0, "Mismatched edge count.");
4850       } else if (in == nullptr) {
4851         assert(i == 0 || i >= n->req() ||
4852                n->is_Region() || n->is_Phi() || n->is_ArrayCopy() ||
4853                (n->is_Allocate() && i >= AllocateNode::InlineType) ||
4854                (n->is_Unlock() && i == (n->req() - 1)) ||
4855                (n->is_MemBar() && i == 5), // the precedence edge to a membar can be removed during macro node expansion
4856               "only region, phi, arraycopy, allocate, unlock or membar nodes have null data edges");
4857       } else {
4858         assert(in->is_top(), "sanity");
4859         // Nothing to check.
4860       }
4861     }
4862   }
4863 }
4864 
4865 //------------------------------verify_graph_edges---------------------------
4866 // Walk the Graph and verify that there is a one-to-one correspondence
4867 // between Use-Def edges and Def-Use edges in the graph.
4868 void Compile::verify_graph_edges(bool no_dead_code) {
4869   if (VerifyGraphEdges) {
4870     Unique_Node_List visited;
4871 
4872     // Call graph walk to check edges
4873     verify_bidirectional_edges(visited);
4874     if (no_dead_code) {
4875       // Now make sure that no visited node is used by an unvisited node.
4876       bool dead_nodes = false;

4966 // (1) subklass is already limited to a subtype of superklass => always ok
4967 // (2) subklass does not overlap with superklass => always fail
4968 // (3) superklass has NO subtypes and we can check with a simple compare.
4969 Compile::SubTypeCheckResult Compile::static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk, bool skip) {
4970   if (skip) {
4971     return SSC_full_test;       // Let caller generate the general case.
4972   }
4973 
4974   if (subk->is_java_subtype_of(superk)) {
4975     return SSC_always_true; // (0) and (1)  this test cannot fail
4976   }
4977 
4978   if (!subk->maybe_java_subtype_of(superk)) {
4979     return SSC_always_false; // (2) true path dead; no dynamic test needed
4980   }
4981 
4982   const Type* superelem = superk;
4983   if (superk->isa_aryklassptr()) {
4984     int ignored;
4985     superelem = superk->is_aryklassptr()->base_element_type(ignored);
4986 
4987     // Do not fold the subtype check to an array klass pointer comparison for null-able inline type arrays
4988     // because null-free [LMyValue <: null-able [LMyValue but the klasses are different. Perform a full test.
4989     if (!superk->is_aryklassptr()->is_null_free() && superk->is_aryklassptr()->elem()->isa_instklassptr() &&
4990         superk->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->is_inlinetype()) {
4991       return SSC_full_test;
4992     }
4993   }
4994 
4995   if (superelem->isa_instklassptr()) {
4996     ciInstanceKlass* ik = superelem->is_instklassptr()->instance_klass();
4997     if (!ik->has_subklass()) {
4998       if (!ik->is_final()) {
4999         // Add a dependency if there is a chance of a later subclass.
5000         dependencies()->assert_leaf_type(ik);
5001       }
5002       if (!superk->maybe_java_subtype_of(subk)) {
5003         return SSC_always_false;
5004       }
5005       return SSC_easy_test;     // (3) caller can do a simple ptr comparison
5006     }
5007   } else {
5008     // A primitive array type has no subtypes.
5009     return SSC_easy_test;       // (3) caller can do a simple ptr comparison
5010   }
5011 
5012   return SSC_full_test;

5572       const Type* t = igvn.type_or_null(n);
5573       assert((t == nullptr) || (t == t->remove_speculative()), "no more speculative types");
5574       if (n->is_Type()) {
5575         t = n->as_Type()->type();
5576         assert(t == t->remove_speculative(), "no more speculative types");
5577       }
5578       // Iterate over outs - endless loops is unreachable from below
5579       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5580         Node *m = n->fast_out(i);
5581         if (not_a_node(m)) {
5582           continue;
5583         }
5584         worklist.push(m);
5585       }
5586     }
5587     igvn.check_no_speculative_types();
5588 #endif
5589   }
5590 }
5591 
5592 Node* Compile::optimize_acmp(PhaseGVN* phase, Node* a, Node* b) {
5593   const TypeInstPtr* ta = phase->type(a)->isa_instptr();
5594   const TypeInstPtr* tb = phase->type(b)->isa_instptr();
5595   if (!EnableValhalla || ta == nullptr || tb == nullptr ||
5596       ta->is_zero_type() || tb->is_zero_type() ||
5597       !ta->can_be_inline_type() || !tb->can_be_inline_type()) {
5598     // Use old acmp if one operand is null or not an inline type
5599     return new CmpPNode(a, b);
5600   } else if (ta->is_inlinetypeptr() || tb->is_inlinetypeptr()) {
5601     // We know that one operand is an inline type. Therefore,
5602     // new acmp will only return true if both operands are nullptr.
5603     // Check if both operands are null by or'ing the oops.
5604     a = phase->transform(new CastP2XNode(nullptr, a));
5605     b = phase->transform(new CastP2XNode(nullptr, b));
5606     a = phase->transform(new OrXNode(a, b));
5607     return new CmpXNode(a, phase->MakeConX(0));
5608   }
5609   // Use new acmp
5610   return nullptr;
5611 }
5612 
5613 // Auxiliary methods to support randomized stressing/fuzzing.
5614 
5615 void Compile::initialize_stress_seed(const DirectiveSet* directive) {
5616   if (FLAG_IS_DEFAULT(StressSeed) || (FLAG_IS_ERGO(StressSeed) && directive->RepeatCompilationOption)) {
5617     _stress_seed = static_cast<uint>(Ticks::now().nanoseconds());
5618     FLAG_SET_ERGO(StressSeed, _stress_seed);
5619   } else {
5620     _stress_seed = StressSeed;
5621   }
5622   if (_log != nullptr) {
5623     _log->elem("stress_test seed='%u'", _stress_seed);
5624   }
5625 }
5626 
5627 int Compile::random() {
5628   _stress_seed = os::next_random(_stress_seed);
5629   return static_cast<int>(_stress_seed);
5630 }
5631 
5632 // This method can be called the arbitrary number of times, with current count
< prev index next >