< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page

  35 #include "compiler/disassembler.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/barrierSet.hpp"
  38 #include "gc/shared/c2/barrierSetC2.hpp"
  39 #include "jfr/jfrEvents.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "opto/addnode.hpp"
  42 #include "opto/block.hpp"
  43 #include "opto/c2compiler.hpp"
  44 #include "opto/callGenerator.hpp"
  45 #include "opto/callnode.hpp"
  46 #include "opto/castnode.hpp"
  47 #include "opto/cfgnode.hpp"
  48 #include "opto/chaitin.hpp"
  49 #include "opto/compile.hpp"
  50 #include "opto/connode.hpp"
  51 #include "opto/convertnode.hpp"
  52 #include "opto/divnode.hpp"
  53 #include "opto/escape.hpp"
  54 #include "opto/idealGraphPrinter.hpp"

  55 #include "opto/loopnode.hpp"
  56 #include "opto/machnode.hpp"
  57 #include "opto/macro.hpp"
  58 #include "opto/matcher.hpp"
  59 #include "opto/mathexactnode.hpp"
  60 #include "opto/memnode.hpp"
  61 #include "opto/mulnode.hpp"
  62 #include "opto/narrowptrnode.hpp"
  63 #include "opto/node.hpp"
  64 #include "opto/opcodes.hpp"
  65 #include "opto/output.hpp"
  66 #include "opto/parse.hpp"
  67 #include "opto/phaseX.hpp"
  68 #include "opto/rootnode.hpp"
  69 #include "opto/runtime.hpp"
  70 #include "opto/stringopts.hpp"
  71 #include "opto/type.hpp"
  72 #include "opto/vector.hpp"
  73 #include "opto/vectornode.hpp"
  74 #include "runtime/globals_extension.hpp"

 372   // Constant node that has no out-edges and has only one in-edge from
 373   // root is usually dead. However, sometimes reshaping walk makes
 374   // it reachable by adding use edges. So, we will NOT count Con nodes
 375   // as dead to be conservative about the dead node count at any
 376   // given time.
 377   if (!dead->is_Con()) {
 378     record_dead_node(dead->_idx);
 379   }
 380   if (dead->is_macro()) {
 381     remove_macro_node(dead);
 382   }
 383   if (dead->is_expensive()) {
 384     remove_expensive_node(dead);
 385   }
 386   if (dead->Opcode() == Op_Opaque4) {
 387     remove_skeleton_predicate_opaq(dead);
 388   }
 389   if (dead->for_post_loop_opts_igvn()) {
 390     remove_from_post_loop_opts_igvn(dead);
 391   }



 392   if (dead->is_Call()) {
 393     remove_useless_late_inlines(                &_late_inlines, dead);
 394     remove_useless_late_inlines(         &_string_late_inlines, dead);
 395     remove_useless_late_inlines(         &_boxing_late_inlines, dead);
 396     remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
 397   }
 398   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 399   bs->unregister_potential_barrier_node(dead);
 400 }
 401 
 402 // Disconnect all useless nodes by disconnecting those at the boundary.
 403 void Compile::remove_useless_nodes(Unique_Node_List &useful) {
 404   uint next = 0;
 405   while (next < useful.size()) {
 406     Node *n = useful.at(next++);
 407     if (n->is_SafePoint()) {
 408       // We're done with a parsing phase. Replaced nodes are not valid
 409       // beyond that point.
 410       n->as_SafePoint()->delete_replaced_nodes();
 411     }
 412     // Use raw traversal of out edges since this code removes out edges
 413     int max = n->outcnt();
 414     for (int j = 0; j < max; ++j) {
 415       Node* child = n->raw_out(j);
 416       if (!useful.member(child)) {
 417         assert(!child->is_top() || child != top(),
 418                "If top is cached in Compile object it is in useful list");
 419         // Only need to remove this out-edge to the useless node
 420         n->raw_del_out(j);
 421         --j;
 422         --max;
 423       }
 424     }
 425     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 426       record_for_igvn(n->unique_out());



 427     }
 428   }
 429 
 430   remove_useless_nodes(_macro_nodes,        useful); // remove useless macro nodes
 431   remove_useless_nodes(_predicate_opaqs,    useful); // remove useless predicate opaque nodes
 432   remove_useless_nodes(_skeleton_predicate_opaqs, useful);
 433   remove_useless_nodes(_expensive_nodes,    useful); // remove useless expensive nodes
 434   remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass






 435   remove_useless_coarsened_locks(useful);            // remove useless coarsened locks nodes
 436 
 437   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 438   bs->eliminate_useless_gc_barriers(useful, this);
 439   // clean up the late inline lists
 440   remove_useless_late_inlines(                &_late_inlines, useful);
 441   remove_useless_late_inlines(         &_string_late_inlines, useful);
 442   remove_useless_late_inlines(         &_boxing_late_inlines, useful);
 443   remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
 444   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 445 }
 446 
 447 // ============================================================================
 448 //------------------------------CompileWrapper---------------------------------
 449 class CompileWrapper : public StackObj {
 450   Compile *const _compile;
 451  public:
 452   CompileWrapper(Compile* compile);
 453 
 454   ~CompileWrapper();

 582                   _has_reserved_stack_access(target->has_reserved_stack_access()),
 583 #ifndef PRODUCT
 584                   _igv_idx(0),
 585                   _trace_opto_output(directive->TraceOptoOutputOption),
 586 #endif
 587                   _has_method_handle_invokes(false),
 588                   _clinit_barrier_on_entry(false),
 589                   _stress_seed(0),
 590                   _comp_arena(mtCompiler),
 591                   _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 592                   _env(ci_env),
 593                   _directive(directive),
 594                   _log(ci_env->log()),
 595                   _failure_reason(NULL),
 596                   _intrinsics        (comp_arena(), 0, 0, NULL),
 597                   _macro_nodes       (comp_arena(), 8, 0, NULL),
 598                   _predicate_opaqs   (comp_arena(), 8, 0, NULL),
 599                   _skeleton_predicate_opaqs (comp_arena(), 8, 0, NULL),
 600                   _expensive_nodes   (comp_arena(), 8, 0, NULL),
 601                   _for_post_loop_igvn(comp_arena(), 8, 0, NULL),

 602                   _coarsened_locks   (comp_arena(), 8, 0, NULL),
 603                   _congraph(NULL),
 604                   NOT_PRODUCT(_igv_printer(NULL) COMMA)
 605                   _dead_node_list(comp_arena()),
 606                   _dead_node_count(0),
 607                   _node_arena(mtCompiler),
 608                   _old_arena(mtCompiler),
 609                   _mach_constant_base_node(NULL),
 610                   _Compile_types(mtCompiler),
 611                   _initial_gvn(NULL),
 612                   _for_igvn(NULL),
 613                   _late_inlines(comp_arena(), 2, 0, NULL),
 614                   _string_late_inlines(comp_arena(), 2, 0, NULL),
 615                   _boxing_late_inlines(comp_arena(), 2, 0, NULL),
 616                   _vector_reboxing_late_inlines(comp_arena(), 2, 0, NULL),
 617                   _late_inlines_pos(0),
 618                   _number_of_mh_late_inlines(0),
 619                   _native_invokers(comp_arena(), 1, 0, NULL),
 620                   _print_inlining_stream(NULL),
 621                   _print_inlining_list(NULL),

 686   // Node list that Iterative GVN will start with
 687   Unique_Node_List for_igvn(comp_arena());
 688   set_for_igvn(&for_igvn);
 689 
 690   // GVN that will be run immediately on new nodes
 691   uint estimated_size = method()->code_size()*4+64;
 692   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
 693   PhaseGVN gvn(node_arena(), estimated_size);
 694   set_initial_gvn(&gvn);
 695 
 696   print_inlining_init();
 697   { // Scope for timing the parser
 698     TracePhase tp("parse", &timers[_t_parser]);
 699 
 700     // Put top into the hash table ASAP.
 701     initial_gvn()->transform_no_reclaim(top());
 702 
 703     // Set up tf(), start(), and find a CallGenerator.
 704     CallGenerator* cg = NULL;
 705     if (is_osr_compilation()) {
 706       const TypeTuple *domain = StartOSRNode::osr_domain();
 707       const TypeTuple *range = TypeTuple::make_range(method()->signature());
 708       init_tf(TypeFunc::make(domain, range));
 709       StartNode* s = new StartOSRNode(root(), domain);
 710       initial_gvn()->set_type_bottom(s);
 711       init_start(s);
 712       cg = CallGenerator::for_osr(method(), entry_bci());
 713     } else {
 714       // Normal case.
 715       init_tf(TypeFunc::make(method()));
 716       StartNode* s = new StartNode(root(), tf()->domain());
 717       initial_gvn()->set_type_bottom(s);
 718       init_start(s);
 719       if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
 720         // With java.lang.ref.reference.get() we must go through the
 721         // intrinsic - even when get() is the root
 722         // method of the compile - so that, if necessary, the value in
 723         // the referent field of the reference object gets recorded by
 724         // the pre-barrier code.
 725         cg = find_intrinsic(method(), false);
 726       }
 727       if (cg == NULL) {
 728         float past_uses = method()->interpreter_invocation_count();
 729         float expected_uses = past_uses;
 730         cg = CallGenerator::for_inline(method(), expected_uses);
 731       }
 732     }
 733     if (failing())  return;
 734     if (cg == NULL) {
 735       record_method_not_compilable("cannot parse method");
 736       return;

 815     print_ideal_ir("print_ideal");
 816   }
 817 #endif
 818 
 819 #ifdef ASSERT
 820   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 821   bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
 822 #endif
 823 
 824   // Dump compilation data to replay it.
 825   if (directive->DumpReplayOption) {
 826     env()->dump_replay_data(_compile_id);
 827   }
 828   if (directive->DumpInlineOption && (ilt() != NULL)) {
 829     env()->dump_inline_data(_compile_id);
 830   }
 831 
 832   // Now that we know the size of all the monitors we can add a fixed slot
 833   // for the original deopt pc.
 834   int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);










 835   set_fixed_slots(next_slot);
 836 
 837   // Compute when to use implicit null checks. Used by matching trap based
 838   // nodes and NullCheck optimization.
 839   set_allowed_deopt_reasons();
 840 
 841   // Now generate code
 842   Code_Gen();
 843 }
 844 
 845 //------------------------------Compile----------------------------------------
 846 // Compile a runtime stub
 847 Compile::Compile( ciEnv* ci_env,
 848                   TypeFunc_generator generator,
 849                   address stub_function,
 850                   const char *stub_name,
 851                   int is_fancy_jump,
 852                   bool pass_tls,
 853                   bool return_pc,
 854                   DirectiveSet* directive)

 966   // Create Debug Information Recorder to record scopes, oopmaps, etc.
 967   env()->set_oop_recorder(new OopRecorder(env()->arena()));
 968   env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
 969   env()->set_dependencies(new Dependencies(env()));
 970 
 971   _fixed_slots = 0;
 972   set_has_split_ifs(false);
 973   set_has_loops(false); // first approximation
 974   set_has_stringbuilder(false);
 975   set_has_boxed_value(false);
 976   _trap_can_recompile = false;  // no traps emitted yet
 977   _major_progress = true; // start out assuming good things will happen
 978   set_has_unsafe_access(false);
 979   set_max_vector_size(0);
 980   set_clear_upper_avx(false);  //false as default for clear upper bits of ymm registers
 981   Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
 982   set_decompile_count(0);
 983 
 984   set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
 985   _loop_opts_cnt = LoopOptsCount;




 986   set_do_inlining(Inline);
 987   set_max_inline_size(MaxInlineSize);
 988   set_freq_inline_size(FreqInlineSize);
 989   set_do_scheduling(OptoScheduling);
 990 
 991   set_do_vector_loop(false);
 992 
 993   if (AllowVectorizeOnDemand) {
 994     if (has_method() && (_directive->VectorizeOption || _directive->VectorizeDebugOption)) {
 995       set_do_vector_loop(true);
 996       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
 997     } else if (has_method() && method()->name() != 0 &&
 998                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
 999       set_do_vector_loop(true);
1000     }
1001   }
1002   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1003   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})
1004 
1005   set_age_code(has_method() && method()->profile_aging());

1269 bool Compile::allow_range_check_smearing() const {
1270   // If this method has already thrown a range-check,
1271   // assume it was because we already tried range smearing
1272   // and it failed.
1273   uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1274   return !already_trapped;
1275 }
1276 
1277 
1278 //------------------------------flatten_alias_type-----------------------------
1279 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1280   int offset = tj->offset();
1281   TypePtr::PTR ptr = tj->ptr();
1282 
1283   // Known instance (scalarizable allocation) alias only with itself.
1284   bool is_known_inst = tj->isa_oopptr() != NULL &&
1285                        tj->is_oopptr()->is_known_instance();
1286 
1287   // Process weird unsafe references.
1288   if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1289     assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");

1290     assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1291     tj = TypeOopPtr::BOTTOM;
1292     ptr = tj->ptr();
1293     offset = tj->offset();
1294   }
1295 
1296   // Array pointers need some flattening
1297   const TypeAryPtr *ta = tj->isa_aryptr();
1298   if (ta && ta->is_stable()) {
1299     // Erase stability property for alias analysis.
1300     tj = ta = ta->cast_to_stable(false);
1301   }









1302   if( ta && is_known_inst ) {
1303     if ( offset != Type::OffsetBot &&
1304          offset > arrayOopDesc::length_offset_in_bytes() ) {
1305       offset = Type::OffsetBot; // Flatten constant access into array body only
1306       tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
1307     }
1308   } else if( ta && _AliasLevel >= 2 ) {
1309     // For arrays indexed by constant indices, we flatten the alias
1310     // space to include all of the array body.  Only the header, klass
1311     // and array length can be accessed un-aliased.


1312     if( offset != Type::OffsetBot ) {
1313       if( ta->const_oop() ) { // MethodData* or Method*
1314         offset = Type::OffsetBot;   // Flatten constant access into array body
1315         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
1316       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1317         // range is OK as-is.
1318         tj = ta = TypeAryPtr::RANGE;
1319       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1320         tj = TypeInstPtr::KLASS; // all klass loads look alike
1321         ta = TypeAryPtr::RANGE; // generic ignored junk
1322         ptr = TypePtr::BotPTR;
1323       } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1324         tj = TypeInstPtr::MARK;
1325         ta = TypeAryPtr::RANGE; // generic ignored junk
1326         ptr = TypePtr::BotPTR;
1327       } else {                  // Random constant offset into array body
1328         offset = Type::OffsetBot;   // Flatten constant access into array body
1329         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
1330       }
1331     }
1332     // Arrays of fixed size alias with arrays of unknown size.
1333     if (ta->size() != TypeInt::POS) {
1334       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1335       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
1336     }
1337     // Arrays of known objects become arrays of unknown objects.
1338     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1339       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1340       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1341     }
1342     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1343       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1344       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);





1345     }
1346     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1347     // cannot be distinguished by bytecode alone.
1348     if (ta->elem() == TypeInt::BOOL) {
1349       const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1350       ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1351       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
1352     }
1353     // During the 2nd round of IterGVN, NotNull castings are removed.
1354     // Make sure the Bottom and NotNull variants alias the same.
1355     // Also, make sure exact and non-exact variants alias the same.
1356     if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
1357       tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
1358     }
1359   }
1360 
1361   // Oop pointers need some flattening
1362   const TypeInstPtr *to = tj->isa_instptr();
1363   if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
1364     ciInstanceKlass *k = to->klass()->as_instance_klass();
1365     if( ptr == TypePtr::Constant ) {
1366       if (to->klass() != ciEnv::current()->Class_klass() ||
1367           offset < k->layout_helper_size_in_bytes()) {
1368         // No constant oop pointers (such as Strings); they alias with
1369         // unknown strings.
1370         assert(!is_known_inst, "not scalarizable allocation");
1371         tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1372       }
1373     } else if( is_known_inst ) {
1374       tj = to; // Keep NotNull and klass_is_exact for instance type
1375     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1376       // During the 2nd round of IterGVN, NotNull castings are removed.
1377       // Make sure the Bottom and NotNull variants alias the same.
1378       // Also, make sure exact and non-exact variants alias the same.
1379       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1380     }
1381     if (to->speculative() != NULL) {
1382       tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
1383     }
1384     // Canonicalize the holder of this field
1385     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1386       // First handle header references such as a LoadKlassNode, even if the
1387       // object's klass is unloaded at compile time (4965979).
1388       if (!is_known_inst) { // Do it only for non-instance types
1389         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
1390       }
1391     } else if (offset < 0 || offset >= k->layout_helper_size_in_bytes()) {
1392       // Static fields are in the space above the normal instance
1393       // fields in the java.lang.Class instance.
1394       if (to->klass() != ciEnv::current()->Class_klass()) {
1395         to = NULL;
1396         tj = TypeOopPtr::BOTTOM;
1397         offset = tj->offset();
1398       }
1399     } else {
1400       ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
1401       assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1402       if (!k->equals(canonical_holder) || tj->offset() != offset) {
1403         if( is_known_inst ) {
1404           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
1405         } else {
1406           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
1407         }
1408       }
1409     }
1410   }
1411 
1412   // Klass pointers to object array klasses need some flattening
1413   const TypeKlassPtr *tk = tj->isa_klassptr();
1414   if( tk ) {
1415     // If we are referencing a field within a Klass, we need
1416     // to assume the worst case of an Object.  Both exact and
1417     // inexact types must flatten to the same alias class so
1418     // use NotNull as the PTR.
1419     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1420 
1421       tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
1422                                    TypeInstKlassPtr::OBJECT->klass(),
1423                                    offset);
1424     }
1425 
1426     ciKlass* klass = tk->klass();
1427     if( klass->is_obj_array_klass() ) {
1428       ciKlass* k = TypeAryPtr::OOPS->klass();
1429       if( !k || !k->is_loaded() )                  // Only fails for some -Xcomp runs
1430         k = TypeInstPtr::BOTTOM->klass();
1431       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset );
1432     }
1433 
1434     // Check for precise loads from the primary supertype array and force them
1435     // to the supertype cache alias index.  Check for generic array loads from
1436     // the primary supertype array and also force them to the supertype cache
1437     // alias index.  Since the same load can reach both, we need to merge
1438     // these 2 disparate memories into the same alias class.  Since the
1439     // primary supertype array is read-only, there's no chance of confusion
1440     // where we bypass an array load and an array store.
1441     int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1442     if (offset == Type::OffsetBot ||
1443         (offset >= primary_supers_offset &&
1444          offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1445         offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1446       offset = in_bytes(Klass::secondary_super_cache_offset());
1447       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
1448     }
1449   }
1450 
1451   // Flatten all Raw pointers together.
1452   if (tj->base() == Type::RawPtr)
1453     tj = TypeRawPtr::BOTTOM;
1454 
1455   if (tj->base() == Type::AnyPtr)
1456     tj = TypePtr::BOTTOM;      // An error, which the caller must check for.
1457 
1458   // Flatten all to bottom for now
1459   switch( _AliasLevel ) {
1460   case 0:
1461     tj = TypePtr::BOTTOM;
1462     break;
1463   case 1:                       // Flatten to: oop, static, field or array
1464     switch (tj->base()) {
1465     //case Type::AryPtr: tj = TypeAryPtr::RANGE;    break;
1466     case Type::RawPtr:   tj = TypeRawPtr::BOTTOM;   break;
1467     case Type::AryPtr:   // do not distinguish arrays at all

1568   intptr_t key = (intptr_t) adr_type;
1569   key ^= key >> logAliasCacheSize;
1570   return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1571 }
1572 
1573 
1574 //-----------------------------grow_alias_types--------------------------------
1575 void Compile::grow_alias_types() {
1576   const int old_ats  = _max_alias_types; // how many before?
1577   const int new_ats  = old_ats;          // how many more?
1578   const int grow_ats = old_ats+new_ats;  // how many now?
1579   _max_alias_types = grow_ats;
1580   _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1581   AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1582   Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1583   for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
1584 }
1585 
1586 
1587 //--------------------------------find_alias_type------------------------------
1588 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
1589   if (_AliasLevel == 0)
1590     return alias_type(AliasIdxBot);
1591 
1592   AliasCacheEntry* ace = probe_alias_cache(adr_type);
1593   if (ace->_adr_type == adr_type) {
1594     return alias_type(ace->_index);



1595   }
1596 
1597   // Handle special cases.
1598   if (adr_type == NULL)             return alias_type(AliasIdxTop);
1599   if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);
1600 
1601   // Do it the slow way.
1602   const TypePtr* flat = flatten_alias_type(adr_type);
1603 
1604 #ifdef ASSERT
1605   {
1606     ResourceMark rm;
1607     assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1608            Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1609     assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1610            Type::str(adr_type));
1611     if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1612       const TypeOopPtr* foop = flat->is_oopptr();
1613       // Scalarizable allocations have exact klass always.
1614       bool exact = !foop->klass_is_exact() || foop->is_known_instance();

1624     if (alias_type(i)->adr_type() == flat) {
1625       idx = i;
1626       break;
1627     }
1628   }
1629 
1630   if (idx == AliasIdxTop) {
1631     if (no_create)  return NULL;
1632     // Grow the array if necessary.
1633     if (_num_alias_types == _max_alias_types)  grow_alias_types();
1634     // Add a new alias type.
1635     idx = _num_alias_types++;
1636     _alias_types[idx]->Init(idx, flat);
1637     if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
1638     if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
1639     if (flat->isa_instptr()) {
1640       if (flat->offset() == java_lang_Class::klass_offset()
1641           && flat->is_instptr()->klass() == env()->Class_klass())
1642         alias_type(idx)->set_rewritable(false);
1643     }

1644     if (flat->isa_aryptr()) {
1645 #ifdef ASSERT
1646       const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1647       // (T_BYTE has the weakest alignment and size restrictions...)
1648       assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1649 #endif

1650       if (flat->offset() == TypePtr::OffsetBot) {
1651         alias_type(idx)->set_element(flat->is_aryptr()->elem());







1652       }
1653     }
1654     if (flat->isa_klassptr()) {
1655       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1656         alias_type(idx)->set_rewritable(false);
1657       if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1658         alias_type(idx)->set_rewritable(false);
1659       if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1660         alias_type(idx)->set_rewritable(false);
1661       if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1662         alias_type(idx)->set_rewritable(false);


1663       if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1664         alias_type(idx)->set_rewritable(false);
1665     }
1666     // %%% (We would like to finalize JavaThread::threadObj_offset(),
1667     // but the base pointer type is not distinctive enough to identify
1668     // references into JavaThread.)
1669 
1670     // Check for final fields.
1671     const TypeInstPtr* tinst = flat->isa_instptr();
1672     if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1673       ciField* field;
1674       if (tinst->const_oop() != NULL &&
1675           tinst->klass() == ciEnv::current()->Class_klass() &&
1676           tinst->offset() >= (tinst->klass()->as_instance_klass()->layout_helper_size_in_bytes())) {
1677         // static field
1678         ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1679         field = k->get_field_by_offset(tinst->offset(), true);




1680       } else {
1681         ciInstanceKlass *k = tinst->klass()->as_instance_klass();
1682         field = k->get_field_by_offset(tinst->offset(), false);
1683       }
1684       assert(field == NULL ||
1685              original_field == NULL ||
1686              (field->holder() == original_field->holder() &&
1687               field->offset() == original_field->offset() &&
1688               field->is_static() == original_field->is_static()), "wrong field?");
1689       // Set field() and is_rewritable() attributes.
1690       if (field != NULL)  alias_type(idx)->set_field(field);







1691     }
1692   }
1693 
1694   // Fill the cache for next time.
1695   ace->_adr_type = adr_type;
1696   ace->_index    = idx;
1697   assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");

1698 
1699   // Might as well try to fill the cache for the flattened version, too.
1700   AliasCacheEntry* face = probe_alias_cache(flat);
1701   if (face->_adr_type == NULL) {
1702     face->_adr_type = flat;
1703     face->_index    = idx;
1704     assert(alias_type(flat) == alias_type(idx), "flat type must work too");

1705   }
1706 
1707   return alias_type(idx);
1708 }
1709 
1710 
1711 Compile::AliasType* Compile::alias_type(ciField* field) {
1712   const TypeOopPtr* t;
1713   if (field->is_static())
1714     t = TypeInstPtr::make(field->holder()->java_mirror());
1715   else
1716     t = TypeOopPtr::make_from_klass_raw(field->holder());
1717   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1718   assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1719   return atp;
1720 }
1721 
1722 
1723 //------------------------------have_alias_type--------------------------------
1724 bool Compile::have_alias_type(const TypePtr* adr_type) {

1801   C->set_post_loop_opts_phase(); // no more loop opts allowed
1802 
1803   assert(!C->major_progress(), "not cleared");
1804 
1805   if (_for_post_loop_igvn.length() > 0) {
1806     while (_for_post_loop_igvn.length() > 0) {
1807       Node* n = _for_post_loop_igvn.pop();
1808       n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1809       igvn._worklist.push(n);
1810     }
1811     igvn.optimize();
1812     assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1813 
1814     // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1815     if (C->major_progress()) {
1816       C->clear_major_progress(); // ensure that major progress is now clear
1817     }
1818   }
1819 }
1820 






















































































































































































































































































































































































































1821 // StringOpts and late inlining of string methods
1822 void Compile::inline_string_calls(bool parse_time) {
1823   {
1824     // remove useless nodes to make the usage analysis simpler
1825     ResourceMark rm;
1826     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1827   }
1828 
1829   {
1830     ResourceMark rm;
1831     print_method(PHASE_BEFORE_STRINGOPTS, 3);
1832     PhaseStringOpts pso(initial_gvn(), for_igvn());
1833     print_method(PHASE_AFTER_STRINGOPTS, 3);
1834   }
1835 
1836   // now inline anything that we skipped the first time around
1837   if (!parse_time) {
1838     _late_inlines_pos = _late_inlines.length();
1839   }
1840 

1990     assert(has_stringbuilder(), "inconsistent");
1991     for_igvn()->clear();
1992     initial_gvn()->replace_with(&igvn);
1993 
1994     inline_string_calls(false);
1995 
1996     if (failing())  return;
1997 
1998     inline_incrementally_cleanup(igvn);
1999   }
2000 
2001   set_inlining_incrementally(false);
2002 }
2003 
2004 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2005   // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2006   // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2007   // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == NULL"
2008   // as if "inlining_incrementally() == true" were set.
2009   assert(inlining_incrementally() == false, "not allowed");
2010   assert(_modified_nodes == NULL, "not allowed");



2011   assert(_late_inlines.length() > 0, "sanity");
2012 
2013   while (_late_inlines.length() > 0) {
2014     for_igvn()->clear();
2015     initial_gvn()->replace_with(&igvn);
2016 
2017     while (inline_incrementally_one()) {
2018       assert(!failing(), "inconsistent");
2019     }
2020     if (failing())  return;
2021 
2022     inline_incrementally_cleanup(igvn);
2023   }

2024 }
2025 
2026 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2027   if (_loop_opts_cnt > 0) {
2028     debug_only( int cnt = 0; );
2029     while (major_progress() && (_loop_opts_cnt > 0)) {
2030       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2031       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2032       PhaseIdealLoop::optimize(igvn, mode);
2033       _loop_opts_cnt--;
2034       if (failing())  return false;
2035       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2036     }
2037   }
2038   return true;
2039 }
2040 
2041 // Remove edges from "root" to each SafePoint at a backward branch.
2042 // They were inserted during parsing (see add_safepoint()) to make
2043 // infinite loops without calls or exceptions visible to root, i.e.,

2147     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2148     initial_gvn()->replace_with(&igvn);
2149     Unique_Node_List* old_worklist = for_igvn();
2150     old_worklist->clear();
2151     Unique_Node_List new_worklist(C->comp_arena());
2152     {
2153       ResourceMark rm;
2154       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2155     }
2156     Unique_Node_List* save_for_igvn = for_igvn();
2157     set_for_igvn(&new_worklist);
2158     igvn = PhaseIterGVN(initial_gvn());
2159     igvn.optimize();
2160     set_for_igvn(old_worklist); // new_worklist is dead beyond this point
2161   }
2162 
2163   // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
2164   // safepoints
2165   remove_root_to_sfpts_edges(igvn);
2166 





2167   // Perform escape analysis
2168   if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) {
2169     if (has_loops()) {
2170       // Cleanup graph (remove dead nodes).
2171       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2172       PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2173       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2174       if (failing())  return;
2175     }
2176     bool progress;
2177     do {
2178       ConnectionGraph::do_analysis(this, &igvn);
2179 
2180       if (failing())  return;
2181 
2182       int mcount = macro_count(); // Record number of allocations and locks before IGVN
2183 
2184       // Optimize out fields loads from scalar replaceable allocations.
2185       igvn.optimize();
2186       print_method(PHASE_ITER_GVN_AFTER_EA, 2);

2260   print_method(PHASE_ITER_GVN2, 2);
2261 
2262   if (failing())  return;
2263 
2264   // Loop transforms on the ideal graph.  Range Check Elimination,
2265   // peeling, unrolling, etc.
2266   if (!optimize_loops(igvn, LoopOptsDefault)) {
2267     return;
2268   }
2269 
2270   if (failing())  return;
2271 
2272   C->clear_major_progress(); // ensure that major progress is now clear
2273 
2274   process_for_post_loop_opts_igvn(igvn);
2275 
2276 #ifdef ASSERT
2277   bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2278 #endif
2279 








2280   {
2281     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2282     PhaseMacroExpand  mex(igvn);
2283     if (mex.expand_macro_nodes()) {
2284       assert(failing(), "must bail out w/ explicit message");
2285       return;
2286     }
2287     print_method(PHASE_MACRO_EXPANSION, 2);
2288   }
2289 




2290   {
2291     TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
2292     if (bs->expand_barriers(this, igvn)) {
2293       assert(failing(), "must bail out w/ explicit message");
2294       return;
2295     }
2296     print_method(PHASE_BARRIER_EXPANSION, 2);
2297   }
2298 
2299   if (C->max_vector_size() > 0) {
2300     C->optimize_logic_cones(igvn);
2301     igvn.optimize();
2302   }
2303 
2304   DEBUG_ONLY( _modified_nodes = NULL; )

2305 
2306   assert(igvn._worklist.size() == 0, "not empty");
2307 
2308   assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2309 
2310   if (_late_inlines.length() > 0) {
2311     // More opportunities to optimize virtual and MH calls.
2312     // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2313     process_late_inline_calls_no_inline(igvn);
2314   }
2315  } // (End scope of igvn; run destructor if necessary for asserts.)
2316 
2317  check_no_dead_use();
2318 
2319  process_print_inlining();
2320 
2321  // A method with only infinite loops has no edges entering loops from root
2322  {
2323    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2324    if (final_graph_reshaping()) {
2325      assert(failing(), "must bail out w/ explicit message");
2326      return;
2327    }
2328  }
2329 
2330  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2331  DEBUG_ONLY(set_phase_optimize_finished();)
2332 }
2333 
2334 #ifdef ASSERT

2917             // Accumulate any precedence edges
2918             if (mem->in(i) != NULL) {
2919               n->add_prec(mem->in(i));
2920             }
2921           }
2922           // Everything above this point has been processed.
2923           done = true;
2924         }
2925         // Eliminate the previous StoreCM
2926         prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
2927         assert(mem->outcnt() == 0, "should be dead");
2928         mem->disconnect_inputs(this);
2929       } else {
2930         prev = mem;
2931       }
2932       mem = prev->in(MemNode::Memory);
2933     }
2934   }
2935 }
2936 

2937 //------------------------------final_graph_reshaping_impl----------------------
2938 // Implement items 1-5 from final_graph_reshaping below.
2939 void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
2940 
2941   if ( n->outcnt() == 0 ) return; // dead node
2942   uint nop = n->Opcode();
2943 
2944   // Check for 2-input instruction with "last use" on right input.
2945   // Swap to left input.  Implements item (2).
2946   if( n->req() == 3 &&          // two-input instruction
2947       n->in(1)->outcnt() > 1 && // left use is NOT a last use
2948       (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
2949       n->in(2)->outcnt() == 1 &&// right use IS a last use
2950       !n->in(2)->is_Con() ) {   // right use is not a constant
2951     // Check for commutative opcode
2952     switch( nop ) {
2953     case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
2954     case Op_MaxI:  case Op_MaxL:  case Op_MaxF:  case Op_MaxD:
2955     case Op_MinI:  case Op_MinL:  case Op_MinF:  case Op_MinD:
2956     case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:

3071       if (n->outcnt() > 1 &&
3072           !n->is_Proj() &&
3073           nop != Op_CreateEx &&
3074           nop != Op_CheckCastPP &&
3075           nop != Op_DecodeN &&
3076           nop != Op_DecodeNKlass &&
3077           !n->is_Mem() &&
3078           !n->is_Phi()) {
3079         Node *x = n->clone();
3080         call->set_req(TypeFunc::Parms, x);
3081       }
3082     }
3083     break;
3084   }
3085 
3086   case Op_StoreCM:
3087     {
3088       // Convert OopStore dependence into precedence edge
3089       Node* prec = n->in(MemNode::OopStore);
3090       n->del_req(MemNode::OopStore);
3091       n->add_prec(prec);















3092       eliminate_redundant_card_marks(n);
3093     }
3094 
3095     // fall through
3096 
3097   case Op_StoreB:
3098   case Op_StoreC:
3099   case Op_StorePConditional:
3100   case Op_StoreI:
3101   case Op_StoreL:
3102   case Op_StoreIConditional:
3103   case Op_StoreLConditional:
3104   case Op_CompareAndSwapB:
3105   case Op_CompareAndSwapS:
3106   case Op_CompareAndSwapI:
3107   case Op_CompareAndSwapL:
3108   case Op_CompareAndSwapP:
3109   case Op_CompareAndSwapN:
3110   case Op_WeakCompareAndSwapB:
3111   case Op_WeakCompareAndSwapS:

3645           // Replace all nodes with identical edges as m with m
3646           k->subsume_by(m, this);
3647         }
3648       }
3649     }
3650     break;
3651   }
3652   case Op_CmpUL: {
3653     if (!Matcher::has_match_rule(Op_CmpUL)) {
3654       // No support for unsigned long comparisons
3655       ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3656       Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3657       Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3658       ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3659       Node* andl = new AndLNode(orl, remove_sign_mask);
3660       Node* cmp = new CmpLNode(andl, n->in(2));
3661       n->subsume_by(cmp, this);
3662     }
3663     break;
3664   }








3665   default:
3666     assert(!n->is_Call(), "");
3667     assert(!n->is_Mem(), "");
3668     assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3669     break;
3670   }
3671 }
3672 
3673 //------------------------------final_graph_reshaping_walk---------------------
3674 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3675 // requires that the walk visits a node's inputs before visiting the node.
3676 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3677   Unique_Node_List sfpt;
3678 
3679   frc._visited.set(root->_idx); // first, mark node as visited
3680   uint cnt = root->req();
3681   Node *n = root;
3682   uint  i = 0;
3683   while (true) {
3684     if (i < cnt) {

4003   }
4004 }
4005 
4006 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4007   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4008 }
4009 
4010 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4011   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4012 }
4013 
4014 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4015   if (holder->is_initialized()) {
4016     return false;
4017   }
4018   if (holder->is_being_initialized()) {
4019     if (accessing_method->holder() == holder) {
4020       // Access inside a class. The barrier can be elided when access happens in <clinit>,
4021       // <init>, or a static method. In all those cases, there was an initialization
4022       // barrier on the holder klass passed.
4023       if (accessing_method->is_static_initializer() ||
4024           accessing_method->is_object_initializer() ||
4025           accessing_method->is_static()) {
4026         return false;
4027       }
4028     } else if (accessing_method->holder()->is_subclass_of(holder)) {
4029       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4030       // In case of <init> or a static method, the barrier is on the subclass is not enough:
4031       // child class can become fully initialized while its parent class is still being initialized.
4032       if (accessing_method->is_static_initializer()) {
4033         return false;
4034       }
4035     }
4036     ciMethod* root = method(); // the root method of compilation
4037     if (root != accessing_method) {
4038       return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4039     }
4040   }
4041   return true;
4042 }
4043 
4044 #ifndef PRODUCT
4045 //------------------------------verify_graph_edges---------------------------
4046 // Walk the Graph and verify that there is a one-to-one correspondence
4047 // between Use-Def edges and Def-Use edges in the graph.
4048 void Compile::verify_graph_edges(bool no_dead_code) {
4049   if (VerifyGraphEdges) {
4050     Unique_Node_List visited;
4051     // Call recursive graph walk to check edges
4052     _root->verify_edges(visited);

4133                   _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
4134   }
4135 
4136   if (VerifyIdealNodeCount) {
4137     Compile::current()->print_missing_nodes();
4138   }
4139 #endif
4140 
4141   if (_log != NULL) {
4142     _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
4143   }
4144 }
4145 
4146 //----------------------------static_subtype_check-----------------------------
4147 // Shortcut important common cases when superklass is exact:
4148 // (0) superklass is java.lang.Object (can occur in reflective code)
4149 // (1) subklass is already limited to a subtype of superklass => always ok
4150 // (2) subklass does not overlap with superklass => always fail
4151 // (3) superklass has NO subtypes and we can check with a simple compare.
4152 int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
4153   if (StressReflectiveCode) {
4154     return SSC_full_test;       // Let caller generate the general case.
4155   }
4156 
4157   if (superk == env()->Object_klass()) {
4158     return SSC_always_true;     // (0) this test cannot fail
4159   }
4160 
4161   ciType* superelem = superk;
4162   ciType* subelem = subk;
4163   if (superelem->is_array_klass()) {
4164     superelem = superelem->as_array_klass()->base_element_type();
4165   }
4166   if (subelem->is_array_klass()) {
4167     subelem = subelem->as_array_klass()->base_element_type();
4168   }
4169 
4170   if (!subk->is_interface()) {  // cannot trust static interface types yet
4171     if (subk->is_subtype_of(superk)) {
4172       return SSC_always_true;   // (1) false path dead; no dynamic test needed
4173     }
4174     if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
4175         !(subelem->is_klass() && subelem->as_klass()->is_interface()) &&
4176         !superk->is_subtype_of(subk)) {
4177       return SSC_always_false;  // (2) true path dead; no dynamic test needed
4178     }
4179   }
4180 







4181   // If casting to an instance klass, it must have no subtypes
4182   if (superk->is_interface()) {
4183     // Cannot trust interfaces yet.
4184     // %%% S.B. superk->nof_implementors() == 1
4185   } else if (superelem->is_instance_klass()) {
4186     ciInstanceKlass* ik = superelem->as_instance_klass();
4187     if (!ik->has_subklass() && !ik->is_interface()) {
4188       if (!ik->is_final()) {
4189         // Add a dependency if there is a chance of a later subclass.
4190         dependencies()->assert_leaf_type(ik);
4191       }
4192       return SSC_easy_test;     // (3) caller can do a simple ptr comparison
4193     }
4194   } else {
4195     // A primitive array type has no subtypes.
4196     return SSC_easy_test;       // (3) caller can do a simple ptr comparison
4197   }
4198 
4199   return SSC_full_test;
4200 }

4692       const Type* t = igvn.type_or_null(n);
4693       assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
4694       if (n->is_Type()) {
4695         t = n->as_Type()->type();
4696         assert(t == t->remove_speculative(), "no more speculative types");
4697       }
4698       // Iterate over outs - endless loops is unreachable from below
4699       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4700         Node *m = n->fast_out(i);
4701         if (not_a_node(m)) {
4702           continue;
4703         }
4704         worklist.push(m);
4705       }
4706     }
4707     igvn.check_no_speculative_types();
4708 #endif
4709   }
4710 }
4711 





















4712 // Auxiliary methods to support randomized stressing/fuzzing.
4713 
4714 int Compile::random() {
4715   _stress_seed = os::next_random(_stress_seed);
4716   return static_cast<int>(_stress_seed);
4717 }
4718 
4719 // This method can be called the arbitrary number of times, with current count
4720 // as the argument. The logic allows selecting a single candidate from the
4721 // running list of candidates as follows:
4722 //    int count = 0;
4723 //    Cand* selected = null;
4724 //    while(cand = cand->next()) {
4725 //      if (randomized_select(++count)) {
4726 //        selected = cand;
4727 //      }
4728 //    }
4729 //
4730 // Including count equalizes the chances any candidate is "selected".
4731 // This is useful when we don't have the complete list of candidates to choose

  35 #include "compiler/disassembler.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/barrierSet.hpp"
  38 #include "gc/shared/c2/barrierSetC2.hpp"
  39 #include "jfr/jfrEvents.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "opto/addnode.hpp"
  42 #include "opto/block.hpp"
  43 #include "opto/c2compiler.hpp"
  44 #include "opto/callGenerator.hpp"
  45 #include "opto/callnode.hpp"
  46 #include "opto/castnode.hpp"
  47 #include "opto/cfgnode.hpp"
  48 #include "opto/chaitin.hpp"
  49 #include "opto/compile.hpp"
  50 #include "opto/connode.hpp"
  51 #include "opto/convertnode.hpp"
  52 #include "opto/divnode.hpp"
  53 #include "opto/escape.hpp"
  54 #include "opto/idealGraphPrinter.hpp"
  55 #include "opto/inlinetypenode.hpp"
  56 #include "opto/loopnode.hpp"
  57 #include "opto/machnode.hpp"
  58 #include "opto/macro.hpp"
  59 #include "opto/matcher.hpp"
  60 #include "opto/mathexactnode.hpp"
  61 #include "opto/memnode.hpp"
  62 #include "opto/mulnode.hpp"
  63 #include "opto/narrowptrnode.hpp"
  64 #include "opto/node.hpp"
  65 #include "opto/opcodes.hpp"
  66 #include "opto/output.hpp"
  67 #include "opto/parse.hpp"
  68 #include "opto/phaseX.hpp"
  69 #include "opto/rootnode.hpp"
  70 #include "opto/runtime.hpp"
  71 #include "opto/stringopts.hpp"
  72 #include "opto/type.hpp"
  73 #include "opto/vector.hpp"
  74 #include "opto/vectornode.hpp"
  75 #include "runtime/globals_extension.hpp"

 373   // Constant node that has no out-edges and has only one in-edge from
 374   // root is usually dead. However, sometimes reshaping walk makes
 375   // it reachable by adding use edges. So, we will NOT count Con nodes
 376   // as dead to be conservative about the dead node count at any
 377   // given time.
 378   if (!dead->is_Con()) {
 379     record_dead_node(dead->_idx);
 380   }
 381   if (dead->is_macro()) {
 382     remove_macro_node(dead);
 383   }
 384   if (dead->is_expensive()) {
 385     remove_expensive_node(dead);
 386   }
 387   if (dead->Opcode() == Op_Opaque4) {
 388     remove_skeleton_predicate_opaq(dead);
 389   }
 390   if (dead->for_post_loop_opts_igvn()) {
 391     remove_from_post_loop_opts_igvn(dead);
 392   }
 393   if (dead->is_InlineTypeBase()) {
 394     remove_inline_type(dead);
 395   }
 396   if (dead->is_Call()) {
 397     remove_useless_late_inlines(                &_late_inlines, dead);
 398     remove_useless_late_inlines(         &_string_late_inlines, dead);
 399     remove_useless_late_inlines(         &_boxing_late_inlines, dead);
 400     remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
 401   }
 402   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 403   bs->unregister_potential_barrier_node(dead);
 404 }
 405 
 406 // Disconnect all useless nodes by disconnecting those at the boundary.
 407 void Compile::disconnect_useless_nodes(Unique_Node_List &useful, Unique_Node_List* worklist) {
 408   uint next = 0;
 409   while (next < useful.size()) {
 410     Node *n = useful.at(next++);
 411     if (n->is_SafePoint()) {
 412       // We're done with a parsing phase. Replaced nodes are not valid
 413       // beyond that point.
 414       n->as_SafePoint()->delete_replaced_nodes();
 415     }
 416     // Use raw traversal of out edges since this code removes out edges
 417     int max = n->outcnt();
 418     for (int j = 0; j < max; ++j) {
 419       Node* child = n->raw_out(j);
 420       if (!useful.member(child)) {
 421         assert(!child->is_top() || child != top(),
 422                "If top is cached in Compile object it is in useful list");
 423         // Only need to remove this out-edge to the useless node
 424         n->raw_del_out(j);
 425         --j;
 426         --max;
 427       }
 428     }
 429     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 430       worklist->push(n->unique_out());
 431     }
 432     if (n->outcnt() == 0) {
 433       worklist->push(n);
 434     }
 435   }
 436 
 437   remove_useless_nodes(_macro_nodes,        useful); // remove useless macro nodes
 438   remove_useless_nodes(_predicate_opaqs,    useful); // remove useless predicate opaque nodes
 439   remove_useless_nodes(_skeleton_predicate_opaqs, useful);
 440   remove_useless_nodes(_expensive_nodes,    useful); // remove useless expensive nodes
 441   remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
 442   remove_useless_nodes(_inline_type_nodes,  useful); // remove useless inline type nodes
 443 #ifdef ASSERT
 444   if (_modified_nodes != NULL) {
 445     _modified_nodes->remove_useless_nodes(useful.member_set());
 446   }
 447 #endif
 448   remove_useless_coarsened_locks(useful);            // remove useless coarsened locks nodes
 449 
 450   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 451   bs->eliminate_useless_gc_barriers(useful, this);
 452   // clean up the late inline lists
 453   remove_useless_late_inlines(                &_late_inlines, useful);
 454   remove_useless_late_inlines(         &_string_late_inlines, useful);
 455   remove_useless_late_inlines(         &_boxing_late_inlines, useful);
 456   remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
 457   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 458 }
 459 
 460 // ============================================================================
 461 //------------------------------CompileWrapper---------------------------------
 462 class CompileWrapper : public StackObj {
 463   Compile *const _compile;
 464  public:
 465   CompileWrapper(Compile* compile);
 466 
 467   ~CompileWrapper();

 595                   _has_reserved_stack_access(target->has_reserved_stack_access()),
 596 #ifndef PRODUCT
 597                   _igv_idx(0),
 598                   _trace_opto_output(directive->TraceOptoOutputOption),
 599 #endif
 600                   _has_method_handle_invokes(false),
 601                   _clinit_barrier_on_entry(false),
 602                   _stress_seed(0),
 603                   _comp_arena(mtCompiler),
 604                   _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 605                   _env(ci_env),
 606                   _directive(directive),
 607                   _log(ci_env->log()),
 608                   _failure_reason(NULL),
 609                   _intrinsics        (comp_arena(), 0, 0, NULL),
 610                   _macro_nodes       (comp_arena(), 8, 0, NULL),
 611                   _predicate_opaqs   (comp_arena(), 8, 0, NULL),
 612                   _skeleton_predicate_opaqs (comp_arena(), 8, 0, NULL),
 613                   _expensive_nodes   (comp_arena(), 8, 0, NULL),
 614                   _for_post_loop_igvn(comp_arena(), 8, 0, NULL),
 615                   _inline_type_nodes (comp_arena(), 8, 0, NULL),
 616                   _coarsened_locks   (comp_arena(), 8, 0, NULL),
 617                   _congraph(NULL),
 618                   NOT_PRODUCT(_igv_printer(NULL) COMMA)
 619                   _dead_node_list(comp_arena()),
 620                   _dead_node_count(0),
 621                   _node_arena(mtCompiler),
 622                   _old_arena(mtCompiler),
 623                   _mach_constant_base_node(NULL),
 624                   _Compile_types(mtCompiler),
 625                   _initial_gvn(NULL),
 626                   _for_igvn(NULL),
 627                   _late_inlines(comp_arena(), 2, 0, NULL),
 628                   _string_late_inlines(comp_arena(), 2, 0, NULL),
 629                   _boxing_late_inlines(comp_arena(), 2, 0, NULL),
 630                   _vector_reboxing_late_inlines(comp_arena(), 2, 0, NULL),
 631                   _late_inlines_pos(0),
 632                   _number_of_mh_late_inlines(0),
 633                   _native_invokers(comp_arena(), 1, 0, NULL),
 634                   _print_inlining_stream(NULL),
 635                   _print_inlining_list(NULL),

 700   // Node list that Iterative GVN will start with
 701   Unique_Node_List for_igvn(comp_arena());
 702   set_for_igvn(&for_igvn);
 703 
 704   // GVN that will be run immediately on new nodes
 705   uint estimated_size = method()->code_size()*4+64;
 706   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
 707   PhaseGVN gvn(node_arena(), estimated_size);
 708   set_initial_gvn(&gvn);
 709 
 710   print_inlining_init();
 711   { // Scope for timing the parser
 712     TracePhase tp("parse", &timers[_t_parser]);
 713 
 714     // Put top into the hash table ASAP.
 715     initial_gvn()->transform_no_reclaim(top());
 716 
 717     // Set up tf(), start(), and find a CallGenerator.
 718     CallGenerator* cg = NULL;
 719     if (is_osr_compilation()) {
 720       init_tf(TypeFunc::make(method(), /* is_osr_compilation = */ true));
 721       StartNode* s = new StartOSRNode(root(), tf()->domain_sig());


 722       initial_gvn()->set_type_bottom(s);
 723       init_start(s);
 724       cg = CallGenerator::for_osr(method(), entry_bci());
 725     } else {
 726       // Normal case.
 727       init_tf(TypeFunc::make(method()));
 728       StartNode* s = new StartNode(root(), tf()->domain_cc());
 729       initial_gvn()->set_type_bottom(s);
 730       init_start(s);
 731       if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
 732         // With java.lang.ref.reference.get() we must go through the
 733         // intrinsic - even when get() is the root
 734         // method of the compile - so that, if necessary, the value in
 735         // the referent field of the reference object gets recorded by
 736         // the pre-barrier code.
 737         cg = find_intrinsic(method(), false);
 738       }
 739       if (cg == NULL) {
 740         float past_uses = method()->interpreter_invocation_count();
 741         float expected_uses = past_uses;
 742         cg = CallGenerator::for_inline(method(), expected_uses);
 743       }
 744     }
 745     if (failing())  return;
 746     if (cg == NULL) {
 747       record_method_not_compilable("cannot parse method");
 748       return;

 827     print_ideal_ir("print_ideal");
 828   }
 829 #endif
 830 
 831 #ifdef ASSERT
 832   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 833   bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
 834 #endif
 835 
 836   // Dump compilation data to replay it.
 837   if (directive->DumpReplayOption) {
 838     env()->dump_replay_data(_compile_id);
 839   }
 840   if (directive->DumpInlineOption && (ilt() != NULL)) {
 841     env()->dump_inline_data(_compile_id);
 842   }
 843 
 844   // Now that we know the size of all the monitors we can add a fixed slot
 845   // for the original deopt pc.
 846   int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
 847   if (needs_stack_repair()) {
 848     // One extra slot for the special stack increment value
 849     next_slot += 2;
 850   }
 851   // TODO 8284443 Only reserve extra slot if needed
 852   if (InlineTypeReturnedAsFields) {
 853     // One extra slot to hold the IsInit information for a nullable
 854     // inline type return if we run out of registers.
 855     next_slot += 2;
 856   }
 857   set_fixed_slots(next_slot);
 858 
 859   // Compute when to use implicit null checks. Used by matching trap based
 860   // nodes and NullCheck optimization.
 861   set_allowed_deopt_reasons();
 862 
 863   // Now generate code
 864   Code_Gen();
 865 }
 866 
 867 //------------------------------Compile----------------------------------------
 868 // Compile a runtime stub
 869 Compile::Compile( ciEnv* ci_env,
 870                   TypeFunc_generator generator,
 871                   address stub_function,
 872                   const char *stub_name,
 873                   int is_fancy_jump,
 874                   bool pass_tls,
 875                   bool return_pc,
 876                   DirectiveSet* directive)

 988   // Create Debug Information Recorder to record scopes, oopmaps, etc.
 989   env()->set_oop_recorder(new OopRecorder(env()->arena()));
 990   env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
 991   env()->set_dependencies(new Dependencies(env()));
 992 
 993   _fixed_slots = 0;
 994   set_has_split_ifs(false);
 995   set_has_loops(false); // first approximation
 996   set_has_stringbuilder(false);
 997   set_has_boxed_value(false);
 998   _trap_can_recompile = false;  // no traps emitted yet
 999   _major_progress = true; // start out assuming good things will happen
1000   set_has_unsafe_access(false);
1001   set_max_vector_size(0);
1002   set_clear_upper_avx(false);  //false as default for clear upper bits of ymm registers
1003   Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1004   set_decompile_count(0);
1005 
1006   set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
1007   _loop_opts_cnt = LoopOptsCount;
1008   _has_flattened_accesses = false;
1009   _flattened_accesses_share_alias = true;
1010   _scalarize_in_safepoints = false;
1011 
1012   set_do_inlining(Inline);
1013   set_max_inline_size(MaxInlineSize);
1014   set_freq_inline_size(FreqInlineSize);
1015   set_do_scheduling(OptoScheduling);
1016 
1017   set_do_vector_loop(false);
1018 
1019   if (AllowVectorizeOnDemand) {
1020     if (has_method() && (_directive->VectorizeOption || _directive->VectorizeDebugOption)) {
1021       set_do_vector_loop(true);
1022       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
1023     } else if (has_method() && method()->name() != 0 &&
1024                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1025       set_do_vector_loop(true);
1026     }
1027   }
1028   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1029   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})
1030 
1031   set_age_code(has_method() && method()->profile_aging());

1295 bool Compile::allow_range_check_smearing() const {
1296   // If this method has already thrown a range-check,
1297   // assume it was because we already tried range smearing
1298   // and it failed.
1299   uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1300   return !already_trapped;
1301 }
1302 
1303 
1304 //------------------------------flatten_alias_type-----------------------------
1305 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1306   int offset = tj->offset();
1307   TypePtr::PTR ptr = tj->ptr();
1308 
1309   // Known instance (scalarizable allocation) alias only with itself.
1310   bool is_known_inst = tj->isa_oopptr() != NULL &&
1311                        tj->is_oopptr()->is_known_instance();
1312 
1313   // Process weird unsafe references.
1314   if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1315     bool default_value_load = EnableValhalla && tj->is_instptr()->klass() == ciEnv::current()->Class_klass();
1316     assert(InlineUnsafeOps || default_value_load, "indeterminate pointers come only from unsafe ops");
1317     assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1318     tj = TypeOopPtr::BOTTOM;
1319     ptr = tj->ptr();
1320     offset = tj->offset();
1321   }
1322 
1323   // Array pointers need some flattening
1324   const TypeAryPtr *ta = tj->isa_aryptr();
1325   if (ta && ta->is_stable()) {
1326     // Erase stability property for alias analysis.
1327     tj = ta = ta->cast_to_stable(false);
1328   }
1329   if (ta && ta->is_not_flat()) {
1330     // Erase not flat property for alias analysis.
1331     tj = ta = ta->cast_to_not_flat(false);
1332   }
1333   if (ta && ta->is_not_null_free()) {
1334     // Erase not null free property for alias analysis.
1335     tj = ta = ta->cast_to_not_null_free(false);
1336   }
1337 
1338   if( ta && is_known_inst ) {
1339     if ( offset != Type::OffsetBot &&
1340          offset > arrayOopDesc::length_offset_in_bytes() ) {
1341       offset = Type::OffsetBot; // Flatten constant access into array body only
1342       tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, Type::Offset(offset), ta->field_offset(), ta->instance_id());
1343     }
1344   } else if( ta && _AliasLevel >= 2 ) {
1345     // For arrays indexed by constant indices, we flatten the alias
1346     // space to include all of the array body.  Only the header, klass
1347     // and array length can be accessed un-aliased.
1348     // For flattened inline type array, each field has its own slice so
1349     // we must include the field offset.
1350     if( offset != Type::OffsetBot ) {
1351       if( ta->const_oop() ) { // MethodData* or Method*
1352         offset = Type::OffsetBot;   // Flatten constant access into array body
1353         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
1354       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1355         // range is OK as-is.
1356         tj = ta = TypeAryPtr::RANGE;
1357       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1358         tj = TypeInstPtr::KLASS; // all klass loads look alike
1359         ta = TypeAryPtr::RANGE; // generic ignored junk
1360         ptr = TypePtr::BotPTR;
1361       } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1362         tj = TypeInstPtr::MARK;
1363         ta = TypeAryPtr::RANGE; // generic ignored junk
1364         ptr = TypePtr::BotPTR;
1365       } else {                  // Random constant offset into array body
1366         offset = Type::OffsetBot;   // Flatten constant access into array body
1367         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
1368       }
1369     }
1370     // Arrays of fixed size alias with arrays of unknown size.
1371     if (ta->size() != TypeInt::POS) {
1372       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1373       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,Type::Offset(offset), ta->field_offset());
1374     }
1375     // Arrays of known objects become arrays of unknown objects.
1376     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1377       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1378       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
1379     }
1380     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1381       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1382       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
1383     }
1384     // Initially all flattened array accesses share a single slice
1385     if (ta->is_flat() && ta->elem() != TypeInlineType::BOTTOM && _flattened_accesses_share_alias) {
1386       const TypeAry *tary = TypeAry::make(TypeInlineType::BOTTOM, ta->size());
1387       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), Type::Offset(Type::OffsetBot));
1388     }
1389     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1390     // cannot be distinguished by bytecode alone.
1391     if (ta->elem() == TypeInt::BOOL) {
1392       const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1393       ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1394       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset());
1395     }
1396     // During the 2nd round of IterGVN, NotNull castings are removed.
1397     // Make sure the Bottom and NotNull variants alias the same.
1398     // Also, make sure exact and non-exact variants alias the same.
1399     if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
1400       tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
1401     }
1402   }
1403 
1404   // Oop pointers need some flattening
1405   const TypeInstPtr *to = tj->isa_instptr();
1406   if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
1407     ciInstanceKlass *k = to->klass()->as_instance_klass();
1408     if( ptr == TypePtr::Constant ) {
1409       if (to->klass() != ciEnv::current()->Class_klass() ||
1410           offset < k->layout_helper_size_in_bytes()) {
1411         // No constant oop pointers (such as Strings); they alias with
1412         // unknown strings.
1413         assert(!is_known_inst, "not scalarizable allocation");
1414         tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset));
1415       }
1416     } else if( is_known_inst ) {
1417       tj = to; // Keep NotNull and klass_is_exact for instance type
1418     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1419       // During the 2nd round of IterGVN, NotNull castings are removed.
1420       // Make sure the Bottom and NotNull variants alias the same.
1421       // Also, make sure exact and non-exact variants alias the same.
1422       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset));
1423     }
1424     if (to->speculative() != NULL) {
1425       tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),Type::Offset(to->offset()), to->klass()->flatten_array(), to->instance_id());
1426     }
1427     // Canonicalize the holder of this field
1428     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1429       // First handle header references such as a LoadKlassNode, even if the
1430       // object's klass is unloaded at compile time (4965979).
1431       if (!is_known_inst) { // Do it only for non-instance types
1432         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, Type::Offset(offset));
1433       }
1434     } else if (offset < 0 || offset >= k->layout_helper_size_in_bytes()) {
1435       // Static fields are in the space above the normal instance
1436       // fields in the java.lang.Class instance.
1437       if (to->klass() != ciEnv::current()->Class_klass()) {
1438         to = NULL;
1439         tj = TypeOopPtr::BOTTOM;
1440         offset = tj->offset();
1441       }
1442     } else {
1443       ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
1444       assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1445       if (!k->equals(canonical_holder) || tj->offset() != offset) {
1446         if( is_known_inst ) {
1447           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, Type::Offset(offset), canonical_holder->flatten_array(), to->instance_id());
1448         } else {
1449           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, Type::Offset(offset));
1450         }
1451       }
1452     }
1453   }
1454 
1455   // Klass pointers to object array klasses need some flattening
1456   const TypeKlassPtr *tk = tj->isa_klassptr();
1457   if( tk ) {
1458     // If we are referencing a field within a Klass, we need
1459     // to assume the worst case of an Object.  Both exact and
1460     // inexact types must flatten to the same alias class so
1461     // use NotNull as the PTR.
1462     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1463 
1464       tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
1465                                    TypeInstKlassPtr::OBJECT->klass(),
1466                                    Type::Offset(offset));
1467     }
1468 
1469     ciKlass* klass = tk->klass();
1470     if (klass != NULL && klass->is_obj_array_klass()) {
1471       ciKlass* k = TypeAryPtr::OOPS->klass();
1472       if( !k || !k->is_loaded() )                  // Only fails for some -Xcomp runs
1473         k = TypeInstPtr::BOTTOM->klass();
1474       tj = tk = TypeKlassPtr::make(TypePtr::NotNull, k, Type::Offset(offset));
1475     }
1476 
1477     // Check for precise loads from the primary supertype array and force them
1478     // to the supertype cache alias index.  Check for generic array loads from
1479     // the primary supertype array and also force them to the supertype cache
1480     // alias index.  Since the same load can reach both, we need to merge
1481     // these 2 disparate memories into the same alias class.  Since the
1482     // primary supertype array is read-only, there's no chance of confusion
1483     // where we bypass an array load and an array store.
1484     int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1485     if (offset == Type::OffsetBot ||
1486         (offset >= primary_supers_offset &&
1487          offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1488         offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1489       offset = in_bytes(Klass::secondary_super_cache_offset());
1490       tj = tk = TypeKlassPtr::make(TypePtr::NotNull, tk->klass(), Type::Offset(offset));
1491     }
1492   }
1493 
1494   // Flatten all Raw pointers together.
1495   if (tj->base() == Type::RawPtr)
1496     tj = TypeRawPtr::BOTTOM;
1497 
1498   if (tj->base() == Type::AnyPtr)
1499     tj = TypePtr::BOTTOM;      // An error, which the caller must check for.
1500 
1501   // Flatten all to bottom for now
1502   switch( _AliasLevel ) {
1503   case 0:
1504     tj = TypePtr::BOTTOM;
1505     break;
1506   case 1:                       // Flatten to: oop, static, field or array
1507     switch (tj->base()) {
1508     //case Type::AryPtr: tj = TypeAryPtr::RANGE;    break;
1509     case Type::RawPtr:   tj = TypeRawPtr::BOTTOM;   break;
1510     case Type::AryPtr:   // do not distinguish arrays at all

1611   intptr_t key = (intptr_t) adr_type;
1612   key ^= key >> logAliasCacheSize;
1613   return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1614 }
1615 
1616 
1617 //-----------------------------grow_alias_types--------------------------------
1618 void Compile::grow_alias_types() {
1619   const int old_ats  = _max_alias_types; // how many before?
1620   const int new_ats  = old_ats;          // how many more?
1621   const int grow_ats = old_ats+new_ats;  // how many now?
1622   _max_alias_types = grow_ats;
1623   _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1624   AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1625   Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1626   for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
1627 }
1628 
1629 
1630 //--------------------------------find_alias_type------------------------------
1631 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field, bool uncached) {
1632   if (_AliasLevel == 0)
1633     return alias_type(AliasIdxBot);
1634 
1635   AliasCacheEntry* ace = NULL;
1636   if (!uncached) {
1637     ace = probe_alias_cache(adr_type);
1638     if (ace->_adr_type == adr_type) {
1639       return alias_type(ace->_index);
1640     }
1641   }
1642 
1643   // Handle special cases.
1644   if (adr_type == NULL)             return alias_type(AliasIdxTop);
1645   if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);
1646 
1647   // Do it the slow way.
1648   const TypePtr* flat = flatten_alias_type(adr_type);
1649 
1650 #ifdef ASSERT
1651   {
1652     ResourceMark rm;
1653     assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1654            Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1655     assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1656            Type::str(adr_type));
1657     if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1658       const TypeOopPtr* foop = flat->is_oopptr();
1659       // Scalarizable allocations have exact klass always.
1660       bool exact = !foop->klass_is_exact() || foop->is_known_instance();

1670     if (alias_type(i)->adr_type() == flat) {
1671       idx = i;
1672       break;
1673     }
1674   }
1675 
1676   if (idx == AliasIdxTop) {
1677     if (no_create)  return NULL;
1678     // Grow the array if necessary.
1679     if (_num_alias_types == _max_alias_types)  grow_alias_types();
1680     // Add a new alias type.
1681     idx = _num_alias_types++;
1682     _alias_types[idx]->Init(idx, flat);
1683     if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
1684     if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
1685     if (flat->isa_instptr()) {
1686       if (flat->offset() == java_lang_Class::klass_offset()
1687           && flat->is_instptr()->klass() == env()->Class_klass())
1688         alias_type(idx)->set_rewritable(false);
1689     }
1690     ciField* field = NULL;
1691     if (flat->isa_aryptr()) {
1692 #ifdef ASSERT
1693       const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1694       // (T_BYTE has the weakest alignment and size restrictions...)
1695       assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1696 #endif
1697       const Type* elemtype = flat->is_aryptr()->elem();
1698       if (flat->offset() == TypePtr::OffsetBot) {
1699         alias_type(idx)->set_element(elemtype);
1700       }
1701       int field_offset = flat->is_aryptr()->field_offset().get();
1702       if (elemtype->isa_inlinetype() &&
1703           field_offset != Type::OffsetBot) {
1704         ciInlineKlass* vk = elemtype->inline_klass();
1705         field_offset += vk->first_field_offset();
1706         field = vk->get_field_by_offset(field_offset, false);
1707       }
1708     }
1709     if (flat->isa_klassptr()) {
1710       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1711         alias_type(idx)->set_rewritable(false);
1712       if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1713         alias_type(idx)->set_rewritable(false);
1714       if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1715         alias_type(idx)->set_rewritable(false);
1716       if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1717         alias_type(idx)->set_rewritable(false);
1718       if (flat->offset() == in_bytes(Klass::layout_helper_offset()))
1719         alias_type(idx)->set_rewritable(false);
1720       if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1721         alias_type(idx)->set_rewritable(false);
1722     }
1723     // %%% (We would like to finalize JavaThread::threadObj_offset(),
1724     // but the base pointer type is not distinctive enough to identify
1725     // references into JavaThread.)
1726 
1727     // Check for final fields.
1728     const TypeInstPtr* tinst = flat->isa_instptr();
1729     if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {

1730       if (tinst->const_oop() != NULL &&
1731           tinst->klass() == ciEnv::current()->Class_klass() &&
1732           tinst->offset() >= (tinst->klass()->as_instance_klass()->layout_helper_size_in_bytes())) {
1733         // static field
1734         ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1735         field = k->get_field_by_offset(tinst->offset(), true);
1736       } else if (tinst->klass()->is_inlinetype()) {
1737         // Inline type field
1738         ciInlineKlass* vk = tinst->inline_klass();
1739         field = vk->get_field_by_offset(tinst->offset(), false);
1740       } else {
1741         ciInstanceKlass* k = tinst->klass()->as_instance_klass();
1742         field = k->get_field_by_offset(tinst->offset(), false);
1743       }
1744     }
1745     assert(field == NULL ||
1746            original_field == NULL ||
1747            (field->holder() == original_field->holder() &&
1748             field->offset() == original_field->offset() &&
1749             field->is_static() == original_field->is_static()), "wrong field?");
1750     // Set field() and is_rewritable() attributes.
1751     if (field != NULL) {
1752       alias_type(idx)->set_field(field);
1753       if (flat->isa_aryptr()) {
1754         // Fields of flat arrays are rewritable although they are declared final
1755         assert(flat->is_aryptr()->is_flat(), "must be a flat array");
1756         alias_type(idx)->set_rewritable(true);
1757       }
1758     }
1759   }
1760 
1761   // Fill the cache for next time.
1762   if (!uncached) {
1763     ace->_adr_type = adr_type;
1764     ace->_index    = idx;
1765     assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");
1766 
1767     // Might as well try to fill the cache for the flattened version, too.
1768     AliasCacheEntry* face = probe_alias_cache(flat);
1769     if (face->_adr_type == NULL) {
1770       face->_adr_type = flat;
1771       face->_index    = idx;
1772       assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1773     }
1774   }
1775 
1776   return alias_type(idx);
1777 }
1778 
1779 
1780 Compile::AliasType* Compile::alias_type(ciField* field) {
1781   const TypeOopPtr* t;
1782   if (field->is_static())
1783     t = TypeInstPtr::make(field->holder()->java_mirror());
1784   else
1785     t = TypeOopPtr::make_from_klass_raw(field->holder());
1786   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1787   assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1788   return atp;
1789 }
1790 
1791 
1792 //------------------------------have_alias_type--------------------------------
1793 bool Compile::have_alias_type(const TypePtr* adr_type) {

1870   C->set_post_loop_opts_phase(); // no more loop opts allowed
1871 
1872   assert(!C->major_progress(), "not cleared");
1873 
1874   if (_for_post_loop_igvn.length() > 0) {
1875     while (_for_post_loop_igvn.length() > 0) {
1876       Node* n = _for_post_loop_igvn.pop();
1877       n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1878       igvn._worklist.push(n);
1879     }
1880     igvn.optimize();
1881     assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1882 
1883     // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1884     if (C->major_progress()) {
1885       C->clear_major_progress(); // ensure that major progress is now clear
1886     }
1887   }
1888 }
1889 
1890 void Compile::add_inline_type(Node* n) {
1891   assert(n->is_InlineTypeBase(), "unexpected node");
1892   _inline_type_nodes.push(n);
1893 }
1894 
1895 void Compile::remove_inline_type(Node* n) {
1896   assert(n->is_InlineTypeBase(), "unexpected node");
1897   if (_inline_type_nodes.contains(n)) {
1898     _inline_type_nodes.remove(n);
1899   }
1900 }
1901 
1902 // Does the return value keep otherwise useless inline type allocations alive?
1903 static bool return_val_keeps_allocations_alive(Node* ret_val) {
1904   ResourceMark rm;
1905   Unique_Node_List wq;
1906   wq.push(ret_val);
1907   bool some_allocations = false;
1908   for (uint i = 0; i < wq.size(); i++) {
1909     Node* n = wq.at(i);
1910     assert(!n->is_InlineType(), "chain of inline type nodes");
1911     if (n->outcnt() > 1) {
1912       // Some other use for the allocation
1913       return false;
1914     } else if (n->is_InlineTypePtr()) {
1915       wq.push(n->in(1));
1916     } else if (n->is_Phi()) {
1917       for (uint j = 1; j < n->req(); j++) {
1918         wq.push(n->in(j));
1919       }
1920     } else if (n->is_CheckCastPP() &&
1921                n->in(1)->is_Proj() &&
1922                n->in(1)->in(0)->is_Allocate()) {
1923       some_allocations = true;
1924     } else if (n->is_CheckCastPP()) {
1925       wq.push(n->in(1));
1926     }
1927   }
1928   return some_allocations;
1929 }
1930 
1931 void Compile::process_inline_types(PhaseIterGVN &igvn, bool remove) {
1932   // Make sure that the return value does not keep an otherwise unused allocation alive
1933   if (tf()->returns_inline_type_as_fields()) {
1934     Node* ret = NULL;
1935     for (uint i = 1; i < root()->req(); i++) {
1936       Node* in = root()->in(i);
1937       if (in->Opcode() == Op_Return) {
1938         assert(ret == NULL, "only one return");
1939         ret = in;
1940       }
1941     }
1942     if (ret != NULL) {
1943       Node* ret_val = ret->in(TypeFunc::Parms);
1944       if (igvn.type(ret_val)->isa_oopptr() &&
1945           return_val_keeps_allocations_alive(ret_val)) {
1946         igvn.replace_input_of(ret, TypeFunc::Parms, InlineTypeNode::tagged_klass(igvn.type(ret_val)->inline_klass(), igvn));
1947         assert(ret_val->outcnt() == 0, "should be dead now");
1948         igvn.remove_dead_node(ret_val);
1949       }
1950     }
1951   }
1952   if (_inline_type_nodes.length() == 0) {
1953     return;
1954   }
1955   // Scalarize inline types in safepoint debug info.
1956   // Delay this until all inlining is over to avoid getting inconsistent debug info.
1957   set_scalarize_in_safepoints(true);
1958   for (int i = _inline_type_nodes.length()-1; i >= 0; i--) {
1959     _inline_type_nodes.at(i)->as_InlineTypeBase()->make_scalar_in_safepoints(&igvn);
1960   }
1961   if (remove) {
1962     // Remove inline type nodes
1963     while (_inline_type_nodes.length() > 0) {
1964       InlineTypeBaseNode* vt = _inline_type_nodes.pop()->as_InlineTypeBase();
1965       if (vt->outcnt() == 0) {
1966         igvn.remove_dead_node(vt);
1967       } else if (vt->is_InlineTypePtr()) {
1968         igvn.replace_node(vt, vt->get_oop());
1969       } else {
1970         // Check if any users are blackholes. If so, rewrite them to use either the
1971         // allocated buffer, or individual components, instead of the inline type node
1972         // that goes away.
1973         for (DUIterator i = vt->outs(); vt->has_out(i); i++) {
1974           if (vt->out(i)->is_Blackhole()) {
1975             BlackholeNode* bh = vt->out(i)->as_Blackhole();
1976 
1977             // Unlink the old input
1978             int idx = bh->find_edge(vt);
1979             assert(idx != -1, "The edge should be there");
1980             bh->del_req(idx);
1981             --i;
1982 
1983             if (vt->is_allocated(&igvn)) {
1984               // Already has the allocated instance, blackhole that
1985               bh->add_req(vt->get_oop());
1986             } else {
1987               // Not allocated yet, blackhole the components
1988               for (uint c = 0; c < vt->field_count(); c++) {
1989                 bh->add_req(vt->field_value(c));
1990               }
1991             }
1992 
1993             // Node modified, record for IGVN
1994             igvn.record_for_igvn(bh);
1995           }
1996         }
1997 
1998 #ifdef ASSERT
1999         for (DUIterator_Fast imax, i = vt->fast_outs(imax); i < imax; i++) {
2000           assert(vt->fast_out(i)->is_InlineTypeBase(), "Unexpected inline type user");
2001         }
2002 #endif
2003         igvn.replace_node(vt, igvn.C->top());
2004       }
2005     }
2006   }
2007   igvn.optimize();
2008 }
2009 
2010 void Compile::adjust_flattened_array_access_aliases(PhaseIterGVN& igvn) {
2011   if (!_has_flattened_accesses) {
2012     return;
2013   }
2014   // Initially, all flattened array accesses share the same slice to
2015   // keep dependencies with Object[] array accesses (that could be
2016   // to a flattened array) correct. We're done with parsing so we
2017   // now know all flattened array accesses in this compile
2018   // unit. Let's move flattened array accesses to their own slice,
2019   // one per element field. This should help memory access
2020   // optimizations.
2021   ResourceMark rm;
2022   Unique_Node_List wq;
2023   wq.push(root());
2024 
2025   Node_List mergememnodes;
2026   Node_List memnodes;
2027 
2028   // Alias index currently shared by all flattened memory accesses
2029   int index = get_alias_index(TypeAryPtr::INLINES);
2030 
2031   // Find MergeMem nodes and flattened array accesses
2032   for (uint i = 0; i < wq.size(); i++) {
2033     Node* n = wq.at(i);
2034     if (n->is_Mem()) {
2035       const TypePtr* adr_type = NULL;
2036       if (n->Opcode() == Op_StoreCM) {
2037         adr_type = get_adr_type(get_alias_index(n->in(MemNode::OopStore)->adr_type()));
2038       } else {
2039         adr_type = get_adr_type(get_alias_index(n->adr_type()));
2040       }
2041       if (adr_type == TypeAryPtr::INLINES) {
2042         memnodes.push(n);
2043       }
2044     } else if (n->is_MergeMem()) {
2045       MergeMemNode* mm = n->as_MergeMem();
2046       if (mm->memory_at(index) != mm->base_memory()) {
2047         mergememnodes.push(n);
2048       }
2049     }
2050     for (uint j = 0; j < n->req(); j++) {
2051       Node* m = n->in(j);
2052       if (m != NULL) {
2053         wq.push(m);
2054       }
2055     }
2056   }
2057 
2058   if (memnodes.size() > 0) {
2059     _flattened_accesses_share_alias = false;
2060 
2061     // We are going to change the slice for the flattened array
2062     // accesses so we need to clear the cache entries that refer to
2063     // them.
2064     for (uint i = 0; i < AliasCacheSize; i++) {
2065       AliasCacheEntry* ace = &_alias_cache[i];
2066       if (ace->_adr_type != NULL &&
2067           ace->_adr_type->isa_aryptr() &&
2068           ace->_adr_type->is_aryptr()->is_flat()) {
2069         ace->_adr_type = NULL;
2070         ace->_index = (i != 0) ? 0 : AliasIdxTop; // Make sure the NULL adr_type resolves to AliasIdxTop
2071       }
2072     }
2073 
2074     // Find what aliases we are going to add
2075     int start_alias = num_alias_types()-1;
2076     int stop_alias = 0;
2077 
2078     for (uint i = 0; i < memnodes.size(); i++) {
2079       Node* m = memnodes.at(i);
2080       const TypePtr* adr_type = NULL;
2081       if (m->Opcode() == Op_StoreCM) {
2082         adr_type = m->in(MemNode::OopStore)->adr_type();
2083         if (adr_type != TypeAryPtr::INLINES) {
2084           // store was optimized out and we lost track of the adr_type
2085           Node* clone = new StoreCMNode(m->in(MemNode::Control), m->in(MemNode::Memory), m->in(MemNode::Address),
2086                                         m->adr_type(), m->in(MemNode::ValueIn), m->in(MemNode::OopStore),
2087                                         get_alias_index(adr_type));
2088           igvn.register_new_node_with_optimizer(clone);
2089           igvn.replace_node(m, clone);
2090         }
2091       } else {
2092         adr_type = m->adr_type();
2093 #ifdef ASSERT
2094         m->as_Mem()->set_adr_type(adr_type);
2095 #endif
2096       }
2097       int idx = get_alias_index(adr_type);
2098       start_alias = MIN2(start_alias, idx);
2099       stop_alias = MAX2(stop_alias, idx);
2100     }
2101 
2102     assert(stop_alias >= start_alias, "should have expanded aliases");
2103 
2104     Node_Stack stack(0);
2105 #ifdef ASSERT
2106     VectorSet seen(Thread::current()->resource_area());
2107 #endif
2108     // Now let's fix the memory graph so each flattened array access
2109     // is moved to the right slice. Start from the MergeMem nodes.
2110     uint last = unique();
2111     for (uint i = 0; i < mergememnodes.size(); i++) {
2112       MergeMemNode* current = mergememnodes.at(i)->as_MergeMem();
2113       Node* n = current->memory_at(index);
2114       MergeMemNode* mm = NULL;
2115       do {
2116         // Follow memory edges through memory accesses, phis and
2117         // narrow membars and push nodes on the stack. Once we hit
2118         // bottom memory, we pop element off the stack one at a
2119         // time, in reverse order, and move them to the right slice
2120         // by changing their memory edges.
2121         if ((n->is_Phi() && n->adr_type() != TypePtr::BOTTOM) || n->is_Mem() || n->adr_type() == TypeAryPtr::INLINES) {
2122           assert(!seen.test_set(n->_idx), "");
2123           // Uses (a load for instance) will need to be moved to the
2124           // right slice as well and will get a new memory state
2125           // that we don't know yet. The use could also be the
2126           // backedge of a loop. We put a place holder node between
2127           // the memory node and its uses. We replace that place
2128           // holder with the correct memory state once we know it,
2129           // i.e. when nodes are popped off the stack. Using the
2130           // place holder make the logic work in the presence of
2131           // loops.
2132           if (n->outcnt() > 1) {
2133             Node* place_holder = NULL;
2134             assert(!n->has_out_with(Op_Node), "");
2135             for (DUIterator k = n->outs(); n->has_out(k); k++) {
2136               Node* u = n->out(k);
2137               if (u != current && u->_idx < last) {
2138                 bool success = false;
2139                 for (uint l = 0; l < u->req(); l++) {
2140                   if (!stack.is_empty() && u == stack.node() && l == stack.index()) {
2141                     continue;
2142                   }
2143                   Node* in = u->in(l);
2144                   if (in == n) {
2145                     if (place_holder == NULL) {
2146                       place_holder = new Node(1);
2147                       place_holder->init_req(0, n);
2148                     }
2149                     igvn.replace_input_of(u, l, place_holder);
2150                     success = true;
2151                   }
2152                 }
2153                 if (success) {
2154                   --k;
2155                 }
2156               }
2157             }
2158           }
2159           if (n->is_Phi()) {
2160             stack.push(n, 1);
2161             n = n->in(1);
2162           } else if (n->is_Mem()) {
2163             stack.push(n, n->req());
2164             n = n->in(MemNode::Memory);
2165           } else {
2166             assert(n->is_Proj() && n->in(0)->Opcode() == Op_MemBarCPUOrder, "");
2167             stack.push(n, n->req());
2168             n = n->in(0)->in(TypeFunc::Memory);
2169           }
2170         } else {
2171           assert(n->adr_type() == TypePtr::BOTTOM || (n->Opcode() == Op_Node && n->_idx >= last) || (n->is_Proj() && n->in(0)->is_Initialize()), "");
2172           // Build a new MergeMem node to carry the new memory state
2173           // as we build it. IGVN should fold extraneous MergeMem
2174           // nodes.
2175           mm = MergeMemNode::make(n);
2176           igvn.register_new_node_with_optimizer(mm);
2177           while (stack.size() > 0) {
2178             Node* m = stack.node();
2179             uint idx = stack.index();
2180             if (m->is_Mem()) {
2181               // Move memory node to its new slice
2182               const TypePtr* adr_type = m->adr_type();
2183               int alias = get_alias_index(adr_type);
2184               Node* prev = mm->memory_at(alias);
2185               igvn.replace_input_of(m, MemNode::Memory, prev);
2186               mm->set_memory_at(alias, m);
2187             } else if (m->is_Phi()) {
2188               // We need as many new phis as there are new aliases
2189               igvn.replace_input_of(m, idx, mm);
2190               if (idx == m->req()-1) {
2191                 Node* r = m->in(0);
2192                 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2193                   const Type* adr_type = get_adr_type(j);
2194                   if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat() || j == (uint)index) {
2195                     continue;
2196                   }
2197                   Node* phi = new PhiNode(r, Type::MEMORY, get_adr_type(j));
2198                   igvn.register_new_node_with_optimizer(phi);
2199                   for (uint k = 1; k < m->req(); k++) {
2200                     phi->init_req(k, m->in(k)->as_MergeMem()->memory_at(j));
2201                   }
2202                   mm->set_memory_at(j, phi);
2203                 }
2204                 Node* base_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2205                 igvn.register_new_node_with_optimizer(base_phi);
2206                 for (uint k = 1; k < m->req(); k++) {
2207                   base_phi->init_req(k, m->in(k)->as_MergeMem()->base_memory());
2208                 }
2209                 mm->set_base_memory(base_phi);
2210               }
2211             } else {
2212               // This is a MemBarCPUOrder node from
2213               // Parse::array_load()/Parse::array_store(), in the
2214               // branch that handles flattened arrays hidden under
2215               // an Object[] array. We also need one new membar per
2216               // new alias to keep the unknown access that the
2217               // membars protect properly ordered with accesses to
2218               // known flattened array.
2219               assert(m->is_Proj(), "projection expected");
2220               Node* ctrl = m->in(0)->in(TypeFunc::Control);
2221               igvn.replace_input_of(m->in(0), TypeFunc::Control, top());
2222               for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2223                 const Type* adr_type = get_adr_type(j);
2224                 if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat() || j == (uint)index) {
2225                   continue;
2226                 }
2227                 MemBarNode* mb = new MemBarCPUOrderNode(this, j, NULL);
2228                 igvn.register_new_node_with_optimizer(mb);
2229                 Node* mem = mm->memory_at(j);
2230                 mb->init_req(TypeFunc::Control, ctrl);
2231                 mb->init_req(TypeFunc::Memory, mem);
2232                 ctrl = new ProjNode(mb, TypeFunc::Control);
2233                 igvn.register_new_node_with_optimizer(ctrl);
2234                 mem = new ProjNode(mb, TypeFunc::Memory);
2235                 igvn.register_new_node_with_optimizer(mem);
2236                 mm->set_memory_at(j, mem);
2237               }
2238               igvn.replace_node(m->in(0)->as_Multi()->proj_out(TypeFunc::Control), ctrl);
2239             }
2240             if (idx < m->req()-1) {
2241               idx += 1;
2242               stack.set_index(idx);
2243               n = m->in(idx);
2244               break;
2245             }
2246             // Take care of place holder nodes
2247             if (m->has_out_with(Op_Node)) {
2248               Node* place_holder = m->find_out_with(Op_Node);
2249               if (place_holder != NULL) {
2250                 Node* mm_clone = mm->clone();
2251                 igvn.register_new_node_with_optimizer(mm_clone);
2252                 Node* hook = new Node(1);
2253                 hook->init_req(0, mm);
2254                 igvn.replace_node(place_holder, mm_clone);
2255                 hook->destruct(&igvn);
2256               }
2257               assert(!m->has_out_with(Op_Node), "place holder should be gone now");
2258             }
2259             stack.pop();
2260           }
2261         }
2262       } while(stack.size() > 0);
2263       // Fix the memory state at the MergeMem we started from
2264       igvn.rehash_node_delayed(current);
2265       for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2266         const Type* adr_type = get_adr_type(j);
2267         if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat()) {
2268           continue;
2269         }
2270         current->set_memory_at(j, mm);
2271       }
2272       current->set_memory_at(index, current->base_memory());
2273     }
2274     igvn.optimize();
2275   }
2276   print_method(PHASE_SPLIT_INLINES_ARRAY, 2);
2277 #ifdef ASSERT
2278   if (!_flattened_accesses_share_alias) {
2279     wq.clear();
2280     wq.push(root());
2281     for (uint i = 0; i < wq.size(); i++) {
2282       Node* n = wq.at(i);
2283       assert(n->adr_type() != TypeAryPtr::INLINES, "should have been removed from the graph");
2284       for (uint j = 0; j < n->req(); j++) {
2285         Node* m = n->in(j);
2286         if (m != NULL) {
2287           wq.push(m);
2288         }
2289       }
2290     }
2291   }
2292 #endif
2293 }
2294 
2295 
2296 // StringOpts and late inlining of string methods
2297 void Compile::inline_string_calls(bool parse_time) {
2298   {
2299     // remove useless nodes to make the usage analysis simpler
2300     ResourceMark rm;
2301     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
2302   }
2303 
2304   {
2305     ResourceMark rm;
2306     print_method(PHASE_BEFORE_STRINGOPTS, 3);
2307     PhaseStringOpts pso(initial_gvn(), for_igvn());
2308     print_method(PHASE_AFTER_STRINGOPTS, 3);
2309   }
2310 
2311   // now inline anything that we skipped the first time around
2312   if (!parse_time) {
2313     _late_inlines_pos = _late_inlines.length();
2314   }
2315 

2465     assert(has_stringbuilder(), "inconsistent");
2466     for_igvn()->clear();
2467     initial_gvn()->replace_with(&igvn);
2468 
2469     inline_string_calls(false);
2470 
2471     if (failing())  return;
2472 
2473     inline_incrementally_cleanup(igvn);
2474   }
2475 
2476   set_inlining_incrementally(false);
2477 }
2478 
2479 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2480   // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2481   // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2482   // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == NULL"
2483   // as if "inlining_incrementally() == true" were set.
2484   assert(inlining_incrementally() == false, "not allowed");
2485 #ifdef ASSERT
2486   Unique_Node_List* modified_nodes = _modified_nodes;
2487   _modified_nodes = NULL;
2488 #endif
2489   assert(_late_inlines.length() > 0, "sanity");
2490 
2491   while (_late_inlines.length() > 0) {
2492     for_igvn()->clear();
2493     initial_gvn()->replace_with(&igvn);
2494 
2495     while (inline_incrementally_one()) {
2496       assert(!failing(), "inconsistent");
2497     }
2498     if (failing())  return;
2499 
2500     inline_incrementally_cleanup(igvn);
2501   }
2502   DEBUG_ONLY( _modified_nodes = modified_nodes; )
2503 }
2504 
2505 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2506   if (_loop_opts_cnt > 0) {
2507     debug_only( int cnt = 0; );
2508     while (major_progress() && (_loop_opts_cnt > 0)) {
2509       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2510       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2511       PhaseIdealLoop::optimize(igvn, mode);
2512       _loop_opts_cnt--;
2513       if (failing())  return false;
2514       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2515     }
2516   }
2517   return true;
2518 }
2519 
2520 // Remove edges from "root" to each SafePoint at a backward branch.
2521 // They were inserted during parsing (see add_safepoint()) to make
2522 // infinite loops without calls or exceptions visible to root, i.e.,

2626     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2627     initial_gvn()->replace_with(&igvn);
2628     Unique_Node_List* old_worklist = for_igvn();
2629     old_worklist->clear();
2630     Unique_Node_List new_worklist(C->comp_arena());
2631     {
2632       ResourceMark rm;
2633       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2634     }
2635     Unique_Node_List* save_for_igvn = for_igvn();
2636     set_for_igvn(&new_worklist);
2637     igvn = PhaseIterGVN(initial_gvn());
2638     igvn.optimize();
2639     set_for_igvn(old_worklist); // new_worklist is dead beyond this point
2640   }
2641 
2642   // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
2643   // safepoints
2644   remove_root_to_sfpts_edges(igvn);
2645 
2646   // Process inline type nodes now that all inlining is over
2647   process_inline_types(igvn);
2648 
2649   adjust_flattened_array_access_aliases(igvn);
2650 
2651   // Perform escape analysis
2652   if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) {
2653     if (has_loops()) {
2654       // Cleanup graph (remove dead nodes).
2655       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2656       PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2657       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2658       if (failing())  return;
2659     }
2660     bool progress;
2661     do {
2662       ConnectionGraph::do_analysis(this, &igvn);
2663 
2664       if (failing())  return;
2665 
2666       int mcount = macro_count(); // Record number of allocations and locks before IGVN
2667 
2668       // Optimize out fields loads from scalar replaceable allocations.
2669       igvn.optimize();
2670       print_method(PHASE_ITER_GVN_AFTER_EA, 2);

2744   print_method(PHASE_ITER_GVN2, 2);
2745 
2746   if (failing())  return;
2747 
2748   // Loop transforms on the ideal graph.  Range Check Elimination,
2749   // peeling, unrolling, etc.
2750   if (!optimize_loops(igvn, LoopOptsDefault)) {
2751     return;
2752   }
2753 
2754   if (failing())  return;
2755 
2756   C->clear_major_progress(); // ensure that major progress is now clear
2757 
2758   process_for_post_loop_opts_igvn(igvn);
2759 
2760 #ifdef ASSERT
2761   bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2762 #endif
2763 
2764   assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2765 
2766   if (_late_inlines.length() > 0) {
2767     // More opportunities to optimize virtual and MH calls.
2768     // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2769     process_late_inline_calls_no_inline(igvn);
2770   }
2771 
2772   {
2773     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2774     PhaseMacroExpand  mex(igvn);
2775     if (mex.expand_macro_nodes()) {
2776       assert(failing(), "must bail out w/ explicit message");
2777       return;
2778     }
2779     print_method(PHASE_MACRO_EXPANSION, 2);
2780   }
2781 
2782   // Process inline type nodes again and remove them. From here
2783   // on we don't need to keep track of field values anymore.
2784   process_inline_types(igvn, /* remove= */ true);
2785 
2786   {
2787     TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
2788     if (bs->expand_barriers(this, igvn)) {
2789       assert(failing(), "must bail out w/ explicit message");
2790       return;
2791     }
2792     print_method(PHASE_BARRIER_EXPANSION, 2);
2793   }
2794 
2795   if (C->max_vector_size() > 0) {
2796     C->optimize_logic_cones(igvn);
2797     igvn.optimize();
2798   }
2799 
2800   DEBUG_ONLY( _modified_nodes = NULL; )
2801   DEBUG_ONLY( _late_inlines.clear(); )
2802 
2803   assert(igvn._worklist.size() == 0, "not empty");








2804  } // (End scope of igvn; run destructor if necessary for asserts.)
2805 
2806  check_no_dead_use();
2807 
2808  process_print_inlining();
2809 
2810  // A method with only infinite loops has no edges entering loops from root
2811  {
2812    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2813    if (final_graph_reshaping()) {
2814      assert(failing(), "must bail out w/ explicit message");
2815      return;
2816    }
2817  }
2818 
2819  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2820  DEBUG_ONLY(set_phase_optimize_finished();)
2821 }
2822 
2823 #ifdef ASSERT

3406             // Accumulate any precedence edges
3407             if (mem->in(i) != NULL) {
3408               n->add_prec(mem->in(i));
3409             }
3410           }
3411           // Everything above this point has been processed.
3412           done = true;
3413         }
3414         // Eliminate the previous StoreCM
3415         prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
3416         assert(mem->outcnt() == 0, "should be dead");
3417         mem->disconnect_inputs(this);
3418       } else {
3419         prev = mem;
3420       }
3421       mem = prev->in(MemNode::Memory);
3422     }
3423   }
3424 }
3425 
3426 
3427 //------------------------------final_graph_reshaping_impl----------------------
3428 // Implement items 1-5 from final_graph_reshaping below.
3429 void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
3430 
3431   if ( n->outcnt() == 0 ) return; // dead node
3432   uint nop = n->Opcode();
3433 
3434   // Check for 2-input instruction with "last use" on right input.
3435   // Swap to left input.  Implements item (2).
3436   if( n->req() == 3 &&          // two-input instruction
3437       n->in(1)->outcnt() > 1 && // left use is NOT a last use
3438       (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
3439       n->in(2)->outcnt() == 1 &&// right use IS a last use
3440       !n->in(2)->is_Con() ) {   // right use is not a constant
3441     // Check for commutative opcode
3442     switch( nop ) {
3443     case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
3444     case Op_MaxI:  case Op_MaxL:  case Op_MaxF:  case Op_MaxD:
3445     case Op_MinI:  case Op_MinL:  case Op_MinF:  case Op_MinD:
3446     case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:

3561       if (n->outcnt() > 1 &&
3562           !n->is_Proj() &&
3563           nop != Op_CreateEx &&
3564           nop != Op_CheckCastPP &&
3565           nop != Op_DecodeN &&
3566           nop != Op_DecodeNKlass &&
3567           !n->is_Mem() &&
3568           !n->is_Phi()) {
3569         Node *x = n->clone();
3570         call->set_req(TypeFunc::Parms, x);
3571       }
3572     }
3573     break;
3574   }
3575 
3576   case Op_StoreCM:
3577     {
3578       // Convert OopStore dependence into precedence edge
3579       Node* prec = n->in(MemNode::OopStore);
3580       n->del_req(MemNode::OopStore);
3581       if (prec->is_MergeMem()) {
3582         MergeMemNode* mm = prec->as_MergeMem();
3583         Node* base = mm->base_memory();
3584         for (int i = AliasIdxRaw + 1; i < num_alias_types(); i++) {
3585           const Type* adr_type = get_adr_type(i);
3586           if (adr_type->isa_aryptr() && adr_type->is_aryptr()->is_flat()) {
3587             Node* m = mm->memory_at(i);
3588             n->add_prec(m);
3589           }
3590         }
3591         if (mm->outcnt() == 0) {
3592           mm->disconnect_inputs(this);
3593         }
3594       } else {
3595         n->add_prec(prec);
3596       }
3597       eliminate_redundant_card_marks(n);
3598     }
3599 
3600     // fall through
3601 
3602   case Op_StoreB:
3603   case Op_StoreC:
3604   case Op_StorePConditional:
3605   case Op_StoreI:
3606   case Op_StoreL:
3607   case Op_StoreIConditional:
3608   case Op_StoreLConditional:
3609   case Op_CompareAndSwapB:
3610   case Op_CompareAndSwapS:
3611   case Op_CompareAndSwapI:
3612   case Op_CompareAndSwapL:
3613   case Op_CompareAndSwapP:
3614   case Op_CompareAndSwapN:
3615   case Op_WeakCompareAndSwapB:
3616   case Op_WeakCompareAndSwapS:

4150           // Replace all nodes with identical edges as m with m
4151           k->subsume_by(m, this);
4152         }
4153       }
4154     }
4155     break;
4156   }
4157   case Op_CmpUL: {
4158     if (!Matcher::has_match_rule(Op_CmpUL)) {
4159       // No support for unsigned long comparisons
4160       ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
4161       Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
4162       Node* orl = new OrLNode(n->in(1), sign_bit_mask);
4163       ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
4164       Node* andl = new AndLNode(orl, remove_sign_mask);
4165       Node* cmp = new CmpLNode(andl, n->in(2));
4166       n->subsume_by(cmp, this);
4167     }
4168     break;
4169   }
4170 #ifdef ASSERT
4171   case Op_InlineTypePtr:
4172   case Op_InlineType: {
4173     n->dump(-1);
4174     assert(false, "inline type node was not removed");
4175     break;
4176   }
4177 #endif
4178   default:
4179     assert(!n->is_Call(), "");
4180     assert(!n->is_Mem(), "");
4181     assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
4182     break;
4183   }
4184 }
4185 
4186 //------------------------------final_graph_reshaping_walk---------------------
4187 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
4188 // requires that the walk visits a node's inputs before visiting the node.
4189 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
4190   Unique_Node_List sfpt;
4191 
4192   frc._visited.set(root->_idx); // first, mark node as visited
4193   uint cnt = root->req();
4194   Node *n = root;
4195   uint  i = 0;
4196   while (true) {
4197     if (i < cnt) {

4516   }
4517 }
4518 
4519 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4520   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4521 }
4522 
4523 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4524   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4525 }
4526 
4527 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4528   if (holder->is_initialized()) {
4529     return false;
4530   }
4531   if (holder->is_being_initialized()) {
4532     if (accessing_method->holder() == holder) {
4533       // Access inside a class. The barrier can be elided when access happens in <clinit>,
4534       // <init>, or a static method. In all those cases, there was an initialization
4535       // barrier on the holder klass passed.
4536       if (accessing_method->is_class_initializer() ||
4537           accessing_method->is_object_constructor() ||
4538           accessing_method->is_static()) {
4539         return false;
4540       }
4541     } else if (accessing_method->holder()->is_subclass_of(holder)) {
4542       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4543       // In case of <init> or a static method, the barrier is on the subclass is not enough:
4544       // child class can become fully initialized while its parent class is still being initialized.
4545       if (accessing_method->is_class_initializer()) {
4546         return false;
4547       }
4548     }
4549     ciMethod* root = method(); // the root method of compilation
4550     if (root != accessing_method) {
4551       return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4552     }
4553   }
4554   return true;
4555 }
4556 
4557 #ifndef PRODUCT
4558 //------------------------------verify_graph_edges---------------------------
4559 // Walk the Graph and verify that there is a one-to-one correspondence
4560 // between Use-Def edges and Def-Use edges in the graph.
4561 void Compile::verify_graph_edges(bool no_dead_code) {
4562   if (VerifyGraphEdges) {
4563     Unique_Node_List visited;
4564     // Call recursive graph walk to check edges
4565     _root->verify_edges(visited);

4646                   _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
4647   }
4648 
4649   if (VerifyIdealNodeCount) {
4650     Compile::current()->print_missing_nodes();
4651   }
4652 #endif
4653 
4654   if (_log != NULL) {
4655     _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
4656   }
4657 }
4658 
4659 //----------------------------static_subtype_check-----------------------------
4660 // Shortcut important common cases when superklass is exact:
4661 // (0) superklass is java.lang.Object (can occur in reflective code)
4662 // (1) subklass is already limited to a subtype of superklass => always ok
4663 // (2) subklass does not overlap with superklass => always fail
4664 // (3) superklass has NO subtypes and we can check with a simple compare.
4665 int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
4666   if (StressReflectiveCode || superk == NULL || subk == NULL) {
4667     return SSC_full_test;       // Let caller generate the general case.
4668   }
4669 
4670   if (superk == env()->Object_klass()) {
4671     return SSC_always_true;     // (0) this test cannot fail
4672   }
4673 
4674   ciType* superelem = superk;
4675   ciType* subelem = subk;
4676   if (superelem->is_array_klass()) {
4677     superelem = superelem->as_array_klass()->base_element_type();
4678   }
4679   if (subelem->is_array_klass()) {
4680     subelem = subelem->as_array_klass()->base_element_type();
4681   }
4682 
4683   if (!subk->is_interface()) {  // cannot trust static interface types yet
4684     if (subk->is_subtype_of(superk)) {
4685       return SSC_always_true;   // (1) false path dead; no dynamic test needed
4686     }
4687     if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
4688         !(subelem->is_klass() && subelem->as_klass()->is_interface()) &&
4689         !superk->is_subtype_of(subk)) {
4690       return SSC_always_false;  // (2) true path dead; no dynamic test needed
4691     }
4692   }
4693 
4694   // Do not fold the subtype check to an array klass pointer comparison for [V? arrays.
4695   // [QMyValue is a subtype of [LMyValue but the klass for [QMyValue is not equal to
4696   // the klass for [LMyValue. Perform a full test.
4697   if (superk->is_obj_array_klass() && !superk->as_array_klass()->is_elem_null_free() &&
4698       superk->as_array_klass()->element_klass()->is_inlinetype()) {
4699     return SSC_full_test;
4700   }
4701   // If casting to an instance klass, it must have no subtypes
4702   if (superk->is_interface()) {
4703     // Cannot trust interfaces yet.
4704     // %%% S.B. superk->nof_implementors() == 1
4705   } else if (superelem->is_instance_klass()) {
4706     ciInstanceKlass* ik = superelem->as_instance_klass();
4707     if (!ik->has_subklass() && !ik->is_interface()) {
4708       if (!ik->is_final()) {
4709         // Add a dependency if there is a chance of a later subclass.
4710         dependencies()->assert_leaf_type(ik);
4711       }
4712       return SSC_easy_test;     // (3) caller can do a simple ptr comparison
4713     }
4714   } else {
4715     // A primitive array type has no subtypes.
4716     return SSC_easy_test;       // (3) caller can do a simple ptr comparison
4717   }
4718 
4719   return SSC_full_test;
4720 }

5212       const Type* t = igvn.type_or_null(n);
5213       assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
5214       if (n->is_Type()) {
5215         t = n->as_Type()->type();
5216         assert(t == t->remove_speculative(), "no more speculative types");
5217       }
5218       // Iterate over outs - endless loops is unreachable from below
5219       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5220         Node *m = n->fast_out(i);
5221         if (not_a_node(m)) {
5222           continue;
5223         }
5224         worklist.push(m);
5225       }
5226     }
5227     igvn.check_no_speculative_types();
5228 #endif
5229   }
5230 }
5231 
5232 Node* Compile::optimize_acmp(PhaseGVN* phase, Node* a, Node* b) {
5233   const TypeInstPtr* ta = phase->type(a)->isa_instptr();
5234   const TypeInstPtr* tb = phase->type(b)->isa_instptr();
5235   if (!EnableValhalla || ta == NULL || tb == NULL ||
5236       ta->is_zero_type() || tb->is_zero_type() ||
5237       !ta->can_be_inline_type() || !tb->can_be_inline_type()) {
5238     // Use old acmp if one operand is null or not an inline type
5239     return new CmpPNode(a, b);
5240   } else if (ta->is_inlinetypeptr() || tb->is_inlinetypeptr()) {
5241     // We know that one operand is an inline type. Therefore,
5242     // new acmp will only return true if both operands are NULL.
5243     // Check if both operands are null by or'ing the oops.
5244     a = phase->transform(new CastP2XNode(NULL, a));
5245     b = phase->transform(new CastP2XNode(NULL, b));
5246     a = phase->transform(new OrXNode(a, b));
5247     return new CmpXNode(a, phase->MakeConX(0));
5248   }
5249   // Use new acmp
5250   return NULL;
5251 }
5252 
5253 // Auxiliary methods to support randomized stressing/fuzzing.
5254 
5255 int Compile::random() {
5256   _stress_seed = os::next_random(_stress_seed);
5257   return static_cast<int>(_stress_seed);
5258 }
5259 
5260 // This method can be called the arbitrary number of times, with current count
5261 // as the argument. The logic allows selecting a single candidate from the
5262 // running list of candidates as follows:
5263 //    int count = 0;
5264 //    Cand* selected = null;
5265 //    while(cand = cand->next()) {
5266 //      if (randomized_select(++count)) {
5267 //        selected = cand;
5268 //      }
5269 //    }
5270 //
5271 // Including count equalizes the chances any candidate is "selected".
5272 // This is useful when we don't have the complete list of candidates to choose
< prev index next >