< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page

  35 #include "compiler/disassembler.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/barrierSet.hpp"
  38 #include "gc/shared/c2/barrierSetC2.hpp"
  39 #include "jfr/jfrEvents.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "opto/addnode.hpp"
  42 #include "opto/block.hpp"
  43 #include "opto/c2compiler.hpp"
  44 #include "opto/callGenerator.hpp"
  45 #include "opto/callnode.hpp"
  46 #include "opto/castnode.hpp"
  47 #include "opto/cfgnode.hpp"
  48 #include "opto/chaitin.hpp"
  49 #include "opto/compile.hpp"
  50 #include "opto/connode.hpp"
  51 #include "opto/convertnode.hpp"
  52 #include "opto/divnode.hpp"
  53 #include "opto/escape.hpp"
  54 #include "opto/idealGraphPrinter.hpp"

  55 #include "opto/loopnode.hpp"
  56 #include "opto/machnode.hpp"
  57 #include "opto/macro.hpp"
  58 #include "opto/matcher.hpp"
  59 #include "opto/mathexactnode.hpp"
  60 #include "opto/memnode.hpp"
  61 #include "opto/mulnode.hpp"
  62 #include "opto/narrowptrnode.hpp"
  63 #include "opto/node.hpp"
  64 #include "opto/opcodes.hpp"
  65 #include "opto/output.hpp"
  66 #include "opto/parse.hpp"
  67 #include "opto/phaseX.hpp"
  68 #include "opto/rootnode.hpp"
  69 #include "opto/runtime.hpp"
  70 #include "opto/stringopts.hpp"
  71 #include "opto/type.hpp"
  72 #include "opto/vector.hpp"
  73 #include "opto/vectornode.hpp"
  74 #include "runtime/globals_extension.hpp"

 372   // Constant node that has no out-edges and has only one in-edge from
 373   // root is usually dead. However, sometimes reshaping walk makes
 374   // it reachable by adding use edges. So, we will NOT count Con nodes
 375   // as dead to be conservative about the dead node count at any
 376   // given time.
 377   if (!dead->is_Con()) {
 378     record_dead_node(dead->_idx);
 379   }
 380   if (dead->is_macro()) {
 381     remove_macro_node(dead);
 382   }
 383   if (dead->is_expensive()) {
 384     remove_expensive_node(dead);
 385   }
 386   if (dead->Opcode() == Op_Opaque4) {
 387     remove_skeleton_predicate_opaq(dead);
 388   }
 389   if (dead->for_post_loop_opts_igvn()) {
 390     remove_from_post_loop_opts_igvn(dead);
 391   }



 392   if (dead->is_Call()) {
 393     remove_useless_late_inlines(                &_late_inlines, dead);
 394     remove_useless_late_inlines(         &_string_late_inlines, dead);
 395     remove_useless_late_inlines(         &_boxing_late_inlines, dead);
 396     remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
 397   }
 398   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 399   bs->unregister_potential_barrier_node(dead);
 400 }
 401 
 402 // Disconnect all useless nodes by disconnecting those at the boundary.
 403 void Compile::remove_useless_nodes(Unique_Node_List &useful) {
 404   uint next = 0;
 405   while (next < useful.size()) {
 406     Node *n = useful.at(next++);
 407     if (n->is_SafePoint()) {
 408       // We're done with a parsing phase. Replaced nodes are not valid
 409       // beyond that point.
 410       n->as_SafePoint()->delete_replaced_nodes();
 411     }
 412     // Use raw traversal of out edges since this code removes out edges
 413     int max = n->outcnt();
 414     for (int j = 0; j < max; ++j) {
 415       Node* child = n->raw_out(j);
 416       if (!useful.member(child)) {
 417         assert(!child->is_top() || child != top(),
 418                "If top is cached in Compile object it is in useful list");
 419         // Only need to remove this out-edge to the useless node
 420         n->raw_del_out(j);
 421         --j;
 422         --max;
 423       }
 424     }
 425     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 426       record_for_igvn(n->unique_out());



 427     }
 428   }
 429 
 430   remove_useless_nodes(_macro_nodes,        useful); // remove useless macro nodes
 431   remove_useless_nodes(_predicate_opaqs,    useful); // remove useless predicate opaque nodes
 432   remove_useless_nodes(_skeleton_predicate_opaqs, useful);
 433   remove_useless_nodes(_expensive_nodes,    useful); // remove useless expensive nodes
 434   remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass






 435   remove_useless_coarsened_locks(useful);            // remove useless coarsened locks nodes
 436 
 437   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 438   bs->eliminate_useless_gc_barriers(useful, this);
 439   // clean up the late inline lists
 440   remove_useless_late_inlines(                &_late_inlines, useful);
 441   remove_useless_late_inlines(         &_string_late_inlines, useful);
 442   remove_useless_late_inlines(         &_boxing_late_inlines, useful);
 443   remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
 444   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 445 }
 446 
 447 // ============================================================================
 448 //------------------------------CompileWrapper---------------------------------
 449 class CompileWrapper : public StackObj {
 450   Compile *const _compile;
 451  public:
 452   CompileWrapper(Compile* compile);
 453 
 454   ~CompileWrapper();

 561 #ifndef PRODUCT
 562                   _igv_idx(0),
 563                   _trace_opto_output(directive->TraceOptoOutputOption),
 564                   _print_ideal(directive->PrintIdealOption),
 565 #endif
 566                   _has_method_handle_invokes(false),
 567                   _clinit_barrier_on_entry(false),
 568                   _stress_seed(0),
 569                   _comp_arena(mtCompiler),
 570                   _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 571                   _env(ci_env),
 572                   _directive(directive),
 573                   _log(ci_env->log()),
 574                   _failure_reason(NULL),
 575                   _intrinsics        (comp_arena(), 0, 0, NULL),
 576                   _macro_nodes       (comp_arena(), 8, 0, NULL),
 577                   _predicate_opaqs   (comp_arena(), 8, 0, NULL),
 578                   _skeleton_predicate_opaqs (comp_arena(), 8, 0, NULL),
 579                   _expensive_nodes   (comp_arena(), 8, 0, NULL),
 580                   _for_post_loop_igvn(comp_arena(), 8, 0, NULL),

 581                   _coarsened_locks   (comp_arena(), 8, 0, NULL),
 582                   _congraph(NULL),
 583                   NOT_PRODUCT(_printer(NULL) COMMA)
 584                   _dead_node_list(comp_arena()),
 585                   _dead_node_count(0),
 586                   _node_arena(mtCompiler),
 587                   _old_arena(mtCompiler),
 588                   _mach_constant_base_node(NULL),
 589                   _Compile_types(mtCompiler),
 590                   _initial_gvn(NULL),
 591                   _for_igvn(NULL),
 592                   _late_inlines(comp_arena(), 2, 0, NULL),
 593                   _string_late_inlines(comp_arena(), 2, 0, NULL),
 594                   _boxing_late_inlines(comp_arena(), 2, 0, NULL),
 595                   _vector_reboxing_late_inlines(comp_arena(), 2, 0, NULL),
 596                   _late_inlines_pos(0),
 597                   _number_of_mh_late_inlines(0),
 598                   _native_invokers(comp_arena(), 1, 0, NULL),
 599                   _print_inlining_stream(NULL),
 600                   _print_inlining_list(NULL),

 665   // Node list that Iterative GVN will start with
 666   Unique_Node_List for_igvn(comp_arena());
 667   set_for_igvn(&for_igvn);
 668 
 669   // GVN that will be run immediately on new nodes
 670   uint estimated_size = method()->code_size()*4+64;
 671   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
 672   PhaseGVN gvn(node_arena(), estimated_size);
 673   set_initial_gvn(&gvn);
 674 
 675   print_inlining_init();
 676   { // Scope for timing the parser
 677     TracePhase tp("parse", &timers[_t_parser]);
 678 
 679     // Put top into the hash table ASAP.
 680     initial_gvn()->transform_no_reclaim(top());
 681 
 682     // Set up tf(), start(), and find a CallGenerator.
 683     CallGenerator* cg = NULL;
 684     if (is_osr_compilation()) {
 685       const TypeTuple *domain = StartOSRNode::osr_domain();
 686       const TypeTuple *range = TypeTuple::make_range(method()->signature());
 687       init_tf(TypeFunc::make(domain, range));
 688       StartNode* s = new StartOSRNode(root(), domain);
 689       initial_gvn()->set_type_bottom(s);
 690       init_start(s);
 691       cg = CallGenerator::for_osr(method(), entry_bci());
 692     } else {
 693       // Normal case.
 694       init_tf(TypeFunc::make(method()));
 695       StartNode* s = new StartNode(root(), tf()->domain());
 696       initial_gvn()->set_type_bottom(s);
 697       init_start(s);
 698       if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
 699         // With java.lang.ref.reference.get() we must go through the
 700         // intrinsic - even when get() is the root
 701         // method of the compile - so that, if necessary, the value in
 702         // the referent field of the reference object gets recorded by
 703         // the pre-barrier code.
 704         cg = find_intrinsic(method(), false);
 705       }
 706       if (cg == NULL) {
 707         float past_uses = method()->interpreter_invocation_count();
 708         float expected_uses = past_uses;
 709         cg = CallGenerator::for_inline(method(), expected_uses);
 710       }
 711     }
 712     if (failing())  return;
 713     if (cg == NULL) {
 714       record_method_not_compilable("cannot parse method");
 715       return;

 806     }
 807   }
 808 #endif
 809 
 810 #ifdef ASSERT
 811   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 812   bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
 813 #endif
 814 
 815   // Dump compilation data to replay it.
 816   if (directive->DumpReplayOption) {
 817     env()->dump_replay_data(_compile_id);
 818   }
 819   if (directive->DumpInlineOption && (ilt() != NULL)) {
 820     env()->dump_inline_data(_compile_id);
 821   }
 822 
 823   // Now that we know the size of all the monitors we can add a fixed slot
 824   // for the original deopt pc.
 825   int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);




 826   set_fixed_slots(next_slot);
 827 
 828   // Compute when to use implicit null checks. Used by matching trap based
 829   // nodes and NullCheck optimization.
 830   set_allowed_deopt_reasons();
 831 
 832   // Now generate code
 833   Code_Gen();
 834 }
 835 
 836 //------------------------------Compile----------------------------------------
 837 // Compile a runtime stub
 838 Compile::Compile( ciEnv* ci_env,
 839                   TypeFunc_generator generator,
 840                   address stub_function,
 841                   const char *stub_name,
 842                   int is_fancy_jump,
 843                   bool pass_tls,
 844                   bool return_pc,
 845                   DirectiveSet* directive)

 962   // Create Debug Information Recorder to record scopes, oopmaps, etc.
 963   env()->set_oop_recorder(new OopRecorder(env()->arena()));
 964   env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
 965   env()->set_dependencies(new Dependencies(env()));
 966 
 967   _fixed_slots = 0;
 968   set_has_split_ifs(false);
 969   set_has_loops(false); // first approximation
 970   set_has_stringbuilder(false);
 971   set_has_boxed_value(false);
 972   _trap_can_recompile = false;  // no traps emitted yet
 973   _major_progress = true; // start out assuming good things will happen
 974   set_has_unsafe_access(false);
 975   set_max_vector_size(0);
 976   set_clear_upper_avx(false);  //false as default for clear upper bits of ymm registers
 977   Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
 978   set_decompile_count(0);
 979 
 980   set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
 981   _loop_opts_cnt = LoopOptsCount;




 982   set_do_inlining(Inline);
 983   set_max_inline_size(MaxInlineSize);
 984   set_freq_inline_size(FreqInlineSize);
 985   set_do_scheduling(OptoScheduling);
 986 
 987   set_do_vector_loop(false);
 988 
 989   if (AllowVectorizeOnDemand) {
 990     if (has_method() && (_directive->VectorizeOption || _directive->VectorizeDebugOption)) {
 991       set_do_vector_loop(true);
 992       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
 993     } else if (has_method() && method()->name() != 0 &&
 994                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
 995       set_do_vector_loop(true);
 996     }
 997   }
 998   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
 999   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})
1000 
1001   set_age_code(has_method() && method()->profile_aging());

1264 bool Compile::allow_range_check_smearing() const {
1265   // If this method has already thrown a range-check,
1266   // assume it was because we already tried range smearing
1267   // and it failed.
1268   uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1269   return !already_trapped;
1270 }
1271 
1272 
1273 //------------------------------flatten_alias_type-----------------------------
1274 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1275   int offset = tj->offset();
1276   TypePtr::PTR ptr = tj->ptr();
1277 
1278   // Known instance (scalarizable allocation) alias only with itself.
1279   bool is_known_inst = tj->isa_oopptr() != NULL &&
1280                        tj->is_oopptr()->is_known_instance();
1281 
1282   // Process weird unsafe references.
1283   if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1284     assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");

1285     assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1286     tj = TypeOopPtr::BOTTOM;
1287     ptr = tj->ptr();
1288     offset = tj->offset();
1289   }
1290 
1291   // Array pointers need some flattening
1292   const TypeAryPtr *ta = tj->isa_aryptr();
1293   if (ta && ta->is_stable()) {
1294     // Erase stability property for alias analysis.
1295     tj = ta = ta->cast_to_stable(false);
1296   }









1297   if( ta && is_known_inst ) {
1298     if ( offset != Type::OffsetBot &&
1299          offset > arrayOopDesc::length_offset_in_bytes() ) {
1300       offset = Type::OffsetBot; // Flatten constant access into array body only
1301       tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
1302     }
1303   } else if( ta && _AliasLevel >= 2 ) {
1304     // For arrays indexed by constant indices, we flatten the alias
1305     // space to include all of the array body.  Only the header, klass
1306     // and array length can be accessed un-aliased.


1307     if( offset != Type::OffsetBot ) {
1308       if( ta->const_oop() ) { // MethodData* or Method*
1309         offset = Type::OffsetBot;   // Flatten constant access into array body
1310         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
1311       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1312         // range is OK as-is.
1313         tj = ta = TypeAryPtr::RANGE;
1314       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1315         tj = TypeInstPtr::KLASS; // all klass loads look alike
1316         ta = TypeAryPtr::RANGE; // generic ignored junk
1317         ptr = TypePtr::BotPTR;
1318       } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1319         tj = TypeInstPtr::MARK;
1320         ta = TypeAryPtr::RANGE; // generic ignored junk
1321         ptr = TypePtr::BotPTR;
1322       } else {                  // Random constant offset into array body
1323         offset = Type::OffsetBot;   // Flatten constant access into array body
1324         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
1325       }
1326     }
1327     // Arrays of fixed size alias with arrays of unknown size.
1328     if (ta->size() != TypeInt::POS) {
1329       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1330       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
1331     }
1332     // Arrays of known objects become arrays of unknown objects.
1333     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1334       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1335       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1336     }
1337     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1338       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1339       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);





1340     }
1341     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1342     // cannot be distinguished by bytecode alone.
1343     if (ta->elem() == TypeInt::BOOL) {
1344       const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1345       ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1346       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
1347     }
1348     // During the 2nd round of IterGVN, NotNull castings are removed.
1349     // Make sure the Bottom and NotNull variants alias the same.
1350     // Also, make sure exact and non-exact variants alias the same.
1351     if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
1352       tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
1353     }
1354   }
1355 
1356   // Oop pointers need some flattening
1357   const TypeInstPtr *to = tj->isa_instptr();
1358   if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
1359     ciInstanceKlass *k = to->klass()->as_instance_klass();
1360     if( ptr == TypePtr::Constant ) {
1361       if (to->klass() != ciEnv::current()->Class_klass() ||
1362           offset < k->layout_helper_size_in_bytes()) {
1363         // No constant oop pointers (such as Strings); they alias with
1364         // unknown strings.
1365         assert(!is_known_inst, "not scalarizable allocation");
1366         tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1367       }
1368     } else if( is_known_inst ) {
1369       tj = to; // Keep NotNull and klass_is_exact for instance type
1370     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1371       // During the 2nd round of IterGVN, NotNull castings are removed.
1372       // Make sure the Bottom and NotNull variants alias the same.
1373       // Also, make sure exact and non-exact variants alias the same.
1374       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1375     }
1376     if (to->speculative() != NULL) {
1377       tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
1378     }
1379     // Canonicalize the holder of this field
1380     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1381       // First handle header references such as a LoadKlassNode, even if the
1382       // object's klass is unloaded at compile time (4965979).
1383       if (!is_known_inst) { // Do it only for non-instance types
1384         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
1385       }
1386     } else if (offset < 0 || offset >= k->layout_helper_size_in_bytes()) {
1387       // Static fields are in the space above the normal instance
1388       // fields in the java.lang.Class instance.
1389       if (to->klass() != ciEnv::current()->Class_klass()) {
1390         to = NULL;
1391         tj = TypeOopPtr::BOTTOM;
1392         offset = tj->offset();
1393       }
1394     } else {
1395       ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
1396       assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1397       if (!k->equals(canonical_holder) || tj->offset() != offset) {
1398         if( is_known_inst ) {
1399           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
1400         } else {
1401           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
1402         }
1403       }
1404     }
1405   }
1406 
1407   // Klass pointers to object array klasses need some flattening
1408   const TypeKlassPtr *tk = tj->isa_klassptr();
1409   if( tk ) {
1410     // If we are referencing a field within a Klass, we need
1411     // to assume the worst case of an Object.  Both exact and
1412     // inexact types must flatten to the same alias class so
1413     // use NotNull as the PTR.
1414     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1415 
1416       tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
1417                                    TypeInstKlassPtr::OBJECT->klass(),
1418                                    offset);
1419     }
1420 
1421     ciKlass* klass = tk->klass();
1422     if( klass->is_obj_array_klass() ) {
1423       ciKlass* k = TypeAryPtr::OOPS->klass();
1424       if( !k || !k->is_loaded() )                  // Only fails for some -Xcomp runs
1425         k = TypeInstPtr::BOTTOM->klass();
1426       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset );
1427     }
1428 
1429     // Check for precise loads from the primary supertype array and force them
1430     // to the supertype cache alias index.  Check for generic array loads from
1431     // the primary supertype array and also force them to the supertype cache
1432     // alias index.  Since the same load can reach both, we need to merge
1433     // these 2 disparate memories into the same alias class.  Since the
1434     // primary supertype array is read-only, there's no chance of confusion
1435     // where we bypass an array load and an array store.
1436     int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1437     if (offset == Type::OffsetBot ||
1438         (offset >= primary_supers_offset &&
1439          offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1440         offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1441       offset = in_bytes(Klass::secondary_super_cache_offset());
1442       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
1443     }
1444   }
1445 
1446   // Flatten all Raw pointers together.
1447   if (tj->base() == Type::RawPtr)
1448     tj = TypeRawPtr::BOTTOM;
1449 
1450   if (tj->base() == Type::AnyPtr)
1451     tj = TypePtr::BOTTOM;      // An error, which the caller must check for.
1452 
1453   // Flatten all to bottom for now
1454   switch( _AliasLevel ) {
1455   case 0:
1456     tj = TypePtr::BOTTOM;
1457     break;
1458   case 1:                       // Flatten to: oop, static, field or array
1459     switch (tj->base()) {
1460     //case Type::AryPtr: tj = TypeAryPtr::RANGE;    break;
1461     case Type::RawPtr:   tj = TypeRawPtr::BOTTOM;   break;
1462     case Type::AryPtr:   // do not distinguish arrays at all

1563   intptr_t key = (intptr_t) adr_type;
1564   key ^= key >> logAliasCacheSize;
1565   return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1566 }
1567 
1568 
1569 //-----------------------------grow_alias_types--------------------------------
1570 void Compile::grow_alias_types() {
1571   const int old_ats  = _max_alias_types; // how many before?
1572   const int new_ats  = old_ats;          // how many more?
1573   const int grow_ats = old_ats+new_ats;  // how many now?
1574   _max_alias_types = grow_ats;
1575   _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1576   AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1577   Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1578   for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
1579 }
1580 
1581 
1582 //--------------------------------find_alias_type------------------------------
1583 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
1584   if (_AliasLevel == 0)
1585     return alias_type(AliasIdxBot);
1586 
1587   AliasCacheEntry* ace = probe_alias_cache(adr_type);
1588   if (ace->_adr_type == adr_type) {
1589     return alias_type(ace->_index);



1590   }
1591 
1592   // Handle special cases.
1593   if (adr_type == NULL)             return alias_type(AliasIdxTop);
1594   if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);
1595 
1596   // Do it the slow way.
1597   const TypePtr* flat = flatten_alias_type(adr_type);
1598 
1599 #ifdef ASSERT
1600   {
1601     ResourceMark rm;
1602     assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1603            Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1604     assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1605            Type::str(adr_type));
1606     if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1607       const TypeOopPtr* foop = flat->is_oopptr();
1608       // Scalarizable allocations have exact klass always.
1609       bool exact = !foop->klass_is_exact() || foop->is_known_instance();

1619     if (alias_type(i)->adr_type() == flat) {
1620       idx = i;
1621       break;
1622     }
1623   }
1624 
1625   if (idx == AliasIdxTop) {
1626     if (no_create)  return NULL;
1627     // Grow the array if necessary.
1628     if (_num_alias_types == _max_alias_types)  grow_alias_types();
1629     // Add a new alias type.
1630     idx = _num_alias_types++;
1631     _alias_types[idx]->Init(idx, flat);
1632     if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
1633     if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
1634     if (flat->isa_instptr()) {
1635       if (flat->offset() == java_lang_Class::klass_offset()
1636           && flat->is_instptr()->klass() == env()->Class_klass())
1637         alias_type(idx)->set_rewritable(false);
1638     }

1639     if (flat->isa_aryptr()) {
1640 #ifdef ASSERT
1641       const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1642       // (T_BYTE has the weakest alignment and size restrictions...)
1643       assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1644 #endif

1645       if (flat->offset() == TypePtr::OffsetBot) {
1646         alias_type(idx)->set_element(flat->is_aryptr()->elem());







1647       }
1648     }
1649     if (flat->isa_klassptr()) {
1650       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1651         alias_type(idx)->set_rewritable(false);
1652       if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1653         alias_type(idx)->set_rewritable(false);
1654       if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1655         alias_type(idx)->set_rewritable(false);
1656       if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1657         alias_type(idx)->set_rewritable(false);


1658       if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1659         alias_type(idx)->set_rewritable(false);
1660     }
1661     // %%% (We would like to finalize JavaThread::threadObj_offset(),
1662     // but the base pointer type is not distinctive enough to identify
1663     // references into JavaThread.)
1664 
1665     // Check for final fields.
1666     const TypeInstPtr* tinst = flat->isa_instptr();
1667     if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1668       ciField* field;
1669       if (tinst->const_oop() != NULL &&
1670           tinst->klass() == ciEnv::current()->Class_klass() &&
1671           tinst->offset() >= (tinst->klass()->as_instance_klass()->layout_helper_size_in_bytes())) {
1672         // static field
1673         ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1674         field = k->get_field_by_offset(tinst->offset(), true);




1675       } else {
1676         ciInstanceKlass *k = tinst->klass()->as_instance_klass();
1677         field = k->get_field_by_offset(tinst->offset(), false);
1678       }
1679       assert(field == NULL ||
1680              original_field == NULL ||
1681              (field->holder() == original_field->holder() &&
1682               field->offset() == original_field->offset() &&
1683               field->is_static() == original_field->is_static()), "wrong field?");
1684       // Set field() and is_rewritable() attributes.
1685       if (field != NULL)  alias_type(idx)->set_field(field);







1686     }
1687   }
1688 
1689   // Fill the cache for next time.
1690   ace->_adr_type = adr_type;
1691   ace->_index    = idx;
1692   assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");

1693 
1694   // Might as well try to fill the cache for the flattened version, too.
1695   AliasCacheEntry* face = probe_alias_cache(flat);
1696   if (face->_adr_type == NULL) {
1697     face->_adr_type = flat;
1698     face->_index    = idx;
1699     assert(alias_type(flat) == alias_type(idx), "flat type must work too");

1700   }
1701 
1702   return alias_type(idx);
1703 }
1704 
1705 
1706 Compile::AliasType* Compile::alias_type(ciField* field) {
1707   const TypeOopPtr* t;
1708   if (field->is_static())
1709     t = TypeInstPtr::make(field->holder()->java_mirror());
1710   else
1711     t = TypeOopPtr::make_from_klass_raw(field->holder());
1712   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1713   assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1714   return atp;
1715 }
1716 
1717 
1718 //------------------------------have_alias_type--------------------------------
1719 bool Compile::have_alias_type(const TypePtr* adr_type) {

1796   C->set_post_loop_opts_phase(); // no more loop opts allowed
1797 
1798   assert(!C->major_progress(), "not cleared");
1799 
1800   if (_for_post_loop_igvn.length() > 0) {
1801     while (_for_post_loop_igvn.length() > 0) {
1802       Node* n = _for_post_loop_igvn.pop();
1803       n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1804       igvn._worklist.push(n);
1805     }
1806     igvn.optimize();
1807     assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1808 
1809     // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1810     if (C->major_progress()) {
1811       C->clear_major_progress(); // ensure that major progress is now clear
1812     }
1813   }
1814 }
1815 






















































































































































































































































































































































































































1816 // StringOpts and late inlining of string methods
1817 void Compile::inline_string_calls(bool parse_time) {
1818   {
1819     // remove useless nodes to make the usage analysis simpler
1820     ResourceMark rm;
1821     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1822   }
1823 
1824   {
1825     ResourceMark rm;
1826     print_method(PHASE_BEFORE_STRINGOPTS, 3);
1827     PhaseStringOpts pso(initial_gvn(), for_igvn());
1828     print_method(PHASE_AFTER_STRINGOPTS, 3);
1829   }
1830 
1831   // now inline anything that we skipped the first time around
1832   if (!parse_time) {
1833     _late_inlines_pos = _late_inlines.length();
1834   }
1835 

1985     assert(has_stringbuilder(), "inconsistent");
1986     for_igvn()->clear();
1987     initial_gvn()->replace_with(&igvn);
1988 
1989     inline_string_calls(false);
1990 
1991     if (failing())  return;
1992 
1993     inline_incrementally_cleanup(igvn);
1994   }
1995 
1996   set_inlining_incrementally(false);
1997 }
1998 
1999 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2000   // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2001   // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2002   // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == NULL"
2003   // as if "inlining_incrementally() == true" were set.
2004   assert(inlining_incrementally() == false, "not allowed");
2005   assert(_modified_nodes == NULL, "not allowed");



2006   assert(_late_inlines.length() > 0, "sanity");
2007 
2008   while (_late_inlines.length() > 0) {
2009     for_igvn()->clear();
2010     initial_gvn()->replace_with(&igvn);
2011 
2012     while (inline_incrementally_one()) {
2013       assert(!failing(), "inconsistent");
2014     }
2015     if (failing())  return;
2016 
2017     inline_incrementally_cleanup(igvn);
2018   }

2019 }
2020 
2021 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2022   if (_loop_opts_cnt > 0) {
2023     debug_only( int cnt = 0; );
2024     while (major_progress() && (_loop_opts_cnt > 0)) {
2025       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2026       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2027       PhaseIdealLoop::optimize(igvn, mode);
2028       _loop_opts_cnt--;
2029       if (failing())  return false;
2030       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2031     }
2032   }
2033   return true;
2034 }
2035 
2036 // Remove edges from "root" to each SafePoint at a backward branch.
2037 // They were inserted during parsing (see add_safepoint()) to make
2038 // infinite loops without calls or exceptions visible to root, i.e.,

2141   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2142     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2143     initial_gvn()->replace_with(&igvn);
2144     for_igvn()->clear();
2145     Unique_Node_List new_worklist(C->comp_arena());
2146     {
2147       ResourceMark rm;
2148       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2149     }
2150     Unique_Node_List* save_for_igvn = for_igvn();
2151     set_for_igvn(&new_worklist);
2152     igvn = PhaseIterGVN(initial_gvn());
2153     igvn.optimize();
2154     set_for_igvn(save_for_igvn);
2155   }
2156 
2157   // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
2158   // safepoints
2159   remove_root_to_sfpts_edges(igvn);
2160 





2161   // Perform escape analysis
2162   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2163     if (has_loops()) {
2164       // Cleanup graph (remove dead nodes).
2165       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2166       PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2167       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2168       if (failing())  return;
2169     }
2170     ConnectionGraph::do_analysis(this, &igvn);
2171 
2172     if (failing())  return;
2173 
2174     // Optimize out fields loads from scalar replaceable allocations.
2175     igvn.optimize();
2176     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2177 
2178     if (failing())  return;
2179 
2180     if (congraph() != NULL && macro_count() > 0) {

2244   print_method(PHASE_ITER_GVN2, 2);
2245 
2246   if (failing())  return;
2247 
2248   // Loop transforms on the ideal graph.  Range Check Elimination,
2249   // peeling, unrolling, etc.
2250   if (!optimize_loops(igvn, LoopOptsDefault)) {
2251     return;
2252   }
2253 
2254   if (failing())  return;
2255 
2256   C->clear_major_progress(); // ensure that major progress is now clear
2257 
2258   process_for_post_loop_opts_igvn(igvn);
2259 
2260 #ifdef ASSERT
2261   bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2262 #endif
2263 








2264   {
2265     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2266     PhaseMacroExpand  mex(igvn);
2267     if (mex.expand_macro_nodes()) {
2268       assert(failing(), "must bail out w/ explicit message");
2269       return;
2270     }
2271     print_method(PHASE_MACRO_EXPANSION, 2);
2272   }
2273 




2274   {
2275     TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
2276     if (bs->expand_barriers(this, igvn)) {
2277       assert(failing(), "must bail out w/ explicit message");
2278       return;
2279     }
2280     print_method(PHASE_BARRIER_EXPANSION, 2);
2281   }
2282 
2283   if (C->max_vector_size() > 0) {
2284     C->optimize_logic_cones(igvn);
2285     igvn.optimize();
2286   }
2287 
2288   DEBUG_ONLY( _modified_nodes = NULL; )
2289 
2290   assert(igvn._worklist.size() == 0, "not empty");
2291 
2292   assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2293 
2294   if (_late_inlines.length() > 0) {
2295     // More opportunities to optimize virtual and MH calls.
2296     // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2297     process_late_inline_calls_no_inline(igvn);
2298   }
2299  } // (End scope of igvn; run destructor if necessary for asserts.)
2300 
2301  check_no_dead_use();
2302 
2303  process_print_inlining();
2304 
2305  // A method with only infinite loops has no edges entering loops from root
2306  {
2307    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2308    if (final_graph_reshaping()) {
2309      assert(failing(), "must bail out w/ explicit message");
2310      return;
2311    }
2312  }
2313 
2314  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2315  DEBUG_ONLY(set_phase_optimize_finished();)
2316 }
2317 
2318 #ifdef ASSERT

2852             // Accumulate any precedence edges
2853             if (mem->in(i) != NULL) {
2854               n->add_prec(mem->in(i));
2855             }
2856           }
2857           // Everything above this point has been processed.
2858           done = true;
2859         }
2860         // Eliminate the previous StoreCM
2861         prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
2862         assert(mem->outcnt() == 0, "should be dead");
2863         mem->disconnect_inputs(this);
2864       } else {
2865         prev = mem;
2866       }
2867       mem = prev->in(MemNode::Memory);
2868     }
2869   }
2870 }
2871 

2872 //------------------------------final_graph_reshaping_impl----------------------
2873 // Implement items 1-5 from final_graph_reshaping below.
2874 void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
2875 
2876   if ( n->outcnt() == 0 ) return; // dead node
2877   uint nop = n->Opcode();
2878 
2879   // Check for 2-input instruction with "last use" on right input.
2880   // Swap to left input.  Implements item (2).
2881   if( n->req() == 3 &&          // two-input instruction
2882       n->in(1)->outcnt() > 1 && // left use is NOT a last use
2883       (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
2884       n->in(2)->outcnt() == 1 &&// right use IS a last use
2885       !n->in(2)->is_Con() ) {   // right use is not a constant
2886     // Check for commutative opcode
2887     switch( nop ) {
2888     case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
2889     case Op_MaxI:  case Op_MaxL:  case Op_MaxF:  case Op_MaxD:
2890     case Op_MinI:  case Op_MinL:  case Op_MinF:  case Op_MinD:
2891     case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:

3006       if (n->outcnt() > 1 &&
3007           !n->is_Proj() &&
3008           nop != Op_CreateEx &&
3009           nop != Op_CheckCastPP &&
3010           nop != Op_DecodeN &&
3011           nop != Op_DecodeNKlass &&
3012           !n->is_Mem() &&
3013           !n->is_Phi()) {
3014         Node *x = n->clone();
3015         call->set_req(TypeFunc::Parms, x);
3016       }
3017     }
3018     break;
3019   }
3020 
3021   case Op_StoreCM:
3022     {
3023       // Convert OopStore dependence into precedence edge
3024       Node* prec = n->in(MemNode::OopStore);
3025       n->del_req(MemNode::OopStore);
3026       n->add_prec(prec);















3027       eliminate_redundant_card_marks(n);
3028     }
3029 
3030     // fall through
3031 
3032   case Op_StoreB:
3033   case Op_StoreC:
3034   case Op_StorePConditional:
3035   case Op_StoreI:
3036   case Op_StoreL:
3037   case Op_StoreIConditional:
3038   case Op_StoreLConditional:
3039   case Op_CompareAndSwapB:
3040   case Op_CompareAndSwapS:
3041   case Op_CompareAndSwapI:
3042   case Op_CompareAndSwapL:
3043   case Op_CompareAndSwapP:
3044   case Op_CompareAndSwapN:
3045   case Op_WeakCompareAndSwapB:
3046   case Op_WeakCompareAndSwapS:

3578           // Replace all nodes with identical edges as m with m
3579           k->subsume_by(m, this);
3580         }
3581       }
3582     }
3583     break;
3584   }
3585   case Op_CmpUL: {
3586     if (!Matcher::has_match_rule(Op_CmpUL)) {
3587       // No support for unsigned long comparisons
3588       ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3589       Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3590       Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3591       ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3592       Node* andl = new AndLNode(orl, remove_sign_mask);
3593       Node* cmp = new CmpLNode(andl, n->in(2));
3594       n->subsume_by(cmp, this);
3595     }
3596     break;
3597   }








3598   default:
3599     assert(!n->is_Call(), "");
3600     assert(!n->is_Mem(), "");
3601     assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3602     break;
3603   }
3604 }
3605 
3606 //------------------------------final_graph_reshaping_walk---------------------
3607 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3608 // requires that the walk visits a node's inputs before visiting the node.
3609 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3610   Unique_Node_List sfpt;
3611 
3612   frc._visited.set(root->_idx); // first, mark node as visited
3613   uint cnt = root->req();
3614   Node *n = root;
3615   uint  i = 0;
3616   while (true) {
3617     if (i < cnt) {

3925   }
3926 }
3927 
3928 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
3929   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
3930 }
3931 
3932 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
3933   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
3934 }
3935 
3936 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
3937   if (holder->is_initialized()) {
3938     return false;
3939   }
3940   if (holder->is_being_initialized()) {
3941     if (accessing_method->holder() == holder) {
3942       // Access inside a class. The barrier can be elided when access happens in <clinit>,
3943       // <init>, or a static method. In all those cases, there was an initialization
3944       // barrier on the holder klass passed.
3945       if (accessing_method->is_static_initializer() ||
3946           accessing_method->is_object_initializer() ||
3947           accessing_method->is_static()) {
3948         return false;
3949       }
3950     } else if (accessing_method->holder()->is_subclass_of(holder)) {
3951       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
3952       // In case of <init> or a static method, the barrier is on the subclass is not enough:
3953       // child class can become fully initialized while its parent class is still being initialized.
3954       if (accessing_method->is_static_initializer()) {
3955         return false;
3956       }
3957     }
3958     ciMethod* root = method(); // the root method of compilation
3959     if (root != accessing_method) {
3960       return needs_clinit_barrier(holder, root); // check access in the context of compilation root
3961     }
3962   }
3963   return true;
3964 }
3965 
3966 #ifndef PRODUCT
3967 //------------------------------verify_graph_edges---------------------------
3968 // Walk the Graph and verify that there is a one-to-one correspondence
3969 // between Use-Def edges and Def-Use edges in the graph.
3970 void Compile::verify_graph_edges(bool no_dead_code) {
3971   if (VerifyGraphEdges) {
3972     Unique_Node_List visited;
3973     // Call recursive graph walk to check edges
3974     _root->verify_edges(visited);

4055                   _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
4056   }
4057 
4058   if (VerifyIdealNodeCount) {
4059     Compile::current()->print_missing_nodes();
4060   }
4061 #endif
4062 
4063   if (_log != NULL) {
4064     _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
4065   }
4066 }
4067 
4068 //----------------------------static_subtype_check-----------------------------
4069 // Shortcut important common cases when superklass is exact:
4070 // (0) superklass is java.lang.Object (can occur in reflective code)
4071 // (1) subklass is already limited to a subtype of superklass => always ok
4072 // (2) subklass does not overlap with superklass => always fail
4073 // (3) superklass has NO subtypes and we can check with a simple compare.
4074 int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
4075   if (StressReflectiveCode) {
4076     return SSC_full_test;       // Let caller generate the general case.
4077   }
4078 
4079   if (superk == env()->Object_klass()) {
4080     return SSC_always_true;     // (0) this test cannot fail
4081   }
4082 
4083   ciType* superelem = superk;
4084   ciType* subelem = subk;
4085   if (superelem->is_array_klass()) {
4086     superelem = superelem->as_array_klass()->base_element_type();
4087   }
4088   if (subelem->is_array_klass()) {
4089     subelem = subelem->as_array_klass()->base_element_type();
4090   }
4091 
4092   if (!subk->is_interface()) {  // cannot trust static interface types yet
4093     if (subk->is_subtype_of(superk)) {
4094       return SSC_always_true;   // (1) false path dead; no dynamic test needed
4095     }
4096     if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
4097         !(subelem->is_klass() && subelem->as_klass()->is_interface()) &&
4098         !superk->is_subtype_of(subk)) {
4099       return SSC_always_false;  // (2) true path dead; no dynamic test needed
4100     }
4101   }
4102 







4103   // If casting to an instance klass, it must have no subtypes
4104   if (superk->is_interface()) {
4105     // Cannot trust interfaces yet.
4106     // %%% S.B. superk->nof_implementors() == 1
4107   } else if (superelem->is_instance_klass()) {
4108     ciInstanceKlass* ik = superelem->as_instance_klass();
4109     if (!ik->has_subklass() && !ik->is_interface()) {
4110       if (!ik->is_final()) {
4111         // Add a dependency if there is a chance of a later subclass.
4112         dependencies()->assert_leaf_type(ik);
4113       }
4114       return SSC_easy_test;     // (3) caller can do a simple ptr comparison
4115     }
4116   } else {
4117     // A primitive array type has no subtypes.
4118     return SSC_easy_test;       // (3) caller can do a simple ptr comparison
4119   }
4120 
4121   return SSC_full_test;
4122 }

4614       const Type* t = igvn.type_or_null(n);
4615       assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
4616       if (n->is_Type()) {
4617         t = n->as_Type()->type();
4618         assert(t == t->remove_speculative(), "no more speculative types");
4619       }
4620       // Iterate over outs - endless loops is unreachable from below
4621       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4622         Node *m = n->fast_out(i);
4623         if (not_a_node(m)) {
4624           continue;
4625         }
4626         worklist.push(m);
4627       }
4628     }
4629     igvn.check_no_speculative_types();
4630 #endif
4631   }
4632 }
4633 





















4634 // Auxiliary methods to support randomized stressing/fuzzing.
4635 
4636 int Compile::random() {
4637   _stress_seed = os::next_random(_stress_seed);
4638   return static_cast<int>(_stress_seed);
4639 }
4640 
4641 // This method can be called the arbitrary number of times, with current count
4642 // as the argument. The logic allows selecting a single candidate from the
4643 // running list of candidates as follows:
4644 //    int count = 0;
4645 //    Cand* selected = null;
4646 //    while(cand = cand->next()) {
4647 //      if (randomized_select(++count)) {
4648 //        selected = cand;
4649 //      }
4650 //    }
4651 //
4652 // Including count equalizes the chances any candidate is "selected".
4653 // This is useful when we don't have the complete list of candidates to choose

  35 #include "compiler/disassembler.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/barrierSet.hpp"
  38 #include "gc/shared/c2/barrierSetC2.hpp"
  39 #include "jfr/jfrEvents.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "opto/addnode.hpp"
  42 #include "opto/block.hpp"
  43 #include "opto/c2compiler.hpp"
  44 #include "opto/callGenerator.hpp"
  45 #include "opto/callnode.hpp"
  46 #include "opto/castnode.hpp"
  47 #include "opto/cfgnode.hpp"
  48 #include "opto/chaitin.hpp"
  49 #include "opto/compile.hpp"
  50 #include "opto/connode.hpp"
  51 #include "opto/convertnode.hpp"
  52 #include "opto/divnode.hpp"
  53 #include "opto/escape.hpp"
  54 #include "opto/idealGraphPrinter.hpp"
  55 #include "opto/inlinetypenode.hpp"
  56 #include "opto/loopnode.hpp"
  57 #include "opto/machnode.hpp"
  58 #include "opto/macro.hpp"
  59 #include "opto/matcher.hpp"
  60 #include "opto/mathexactnode.hpp"
  61 #include "opto/memnode.hpp"
  62 #include "opto/mulnode.hpp"
  63 #include "opto/narrowptrnode.hpp"
  64 #include "opto/node.hpp"
  65 #include "opto/opcodes.hpp"
  66 #include "opto/output.hpp"
  67 #include "opto/parse.hpp"
  68 #include "opto/phaseX.hpp"
  69 #include "opto/rootnode.hpp"
  70 #include "opto/runtime.hpp"
  71 #include "opto/stringopts.hpp"
  72 #include "opto/type.hpp"
  73 #include "opto/vector.hpp"
  74 #include "opto/vectornode.hpp"
  75 #include "runtime/globals_extension.hpp"

 373   // Constant node that has no out-edges and has only one in-edge from
 374   // root is usually dead. However, sometimes reshaping walk makes
 375   // it reachable by adding use edges. So, we will NOT count Con nodes
 376   // as dead to be conservative about the dead node count at any
 377   // given time.
 378   if (!dead->is_Con()) {
 379     record_dead_node(dead->_idx);
 380   }
 381   if (dead->is_macro()) {
 382     remove_macro_node(dead);
 383   }
 384   if (dead->is_expensive()) {
 385     remove_expensive_node(dead);
 386   }
 387   if (dead->Opcode() == Op_Opaque4) {
 388     remove_skeleton_predicate_opaq(dead);
 389   }
 390   if (dead->for_post_loop_opts_igvn()) {
 391     remove_from_post_loop_opts_igvn(dead);
 392   }
 393   if (dead->is_InlineTypeBase()) {
 394     remove_inline_type(dead);
 395   }
 396   if (dead->is_Call()) {
 397     remove_useless_late_inlines(                &_late_inlines, dead);
 398     remove_useless_late_inlines(         &_string_late_inlines, dead);
 399     remove_useless_late_inlines(         &_boxing_late_inlines, dead);
 400     remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
 401   }
 402   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 403   bs->unregister_potential_barrier_node(dead);
 404 }
 405 
 406 // Disconnect all useless nodes by disconnecting those at the boundary.
 407 void Compile::disconnect_useless_nodes(Unique_Node_List &useful, Unique_Node_List* worklist) {
 408   uint next = 0;
 409   while (next < useful.size()) {
 410     Node *n = useful.at(next++);
 411     if (n->is_SafePoint()) {
 412       // We're done with a parsing phase. Replaced nodes are not valid
 413       // beyond that point.
 414       n->as_SafePoint()->delete_replaced_nodes();
 415     }
 416     // Use raw traversal of out edges since this code removes out edges
 417     int max = n->outcnt();
 418     for (int j = 0; j < max; ++j) {
 419       Node* child = n->raw_out(j);
 420       if (!useful.member(child)) {
 421         assert(!child->is_top() || child != top(),
 422                "If top is cached in Compile object it is in useful list");
 423         // Only need to remove this out-edge to the useless node
 424         n->raw_del_out(j);
 425         --j;
 426         --max;
 427       }
 428     }
 429     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 430       worklist->push(n->unique_out());
 431     }
 432     if (n->outcnt() == 0) {
 433       worklist->push(n);
 434     }
 435   }
 436 
 437   remove_useless_nodes(_macro_nodes,        useful); // remove useless macro nodes
 438   remove_useless_nodes(_predicate_opaqs,    useful); // remove useless predicate opaque nodes
 439   remove_useless_nodes(_skeleton_predicate_opaqs, useful);
 440   remove_useless_nodes(_expensive_nodes,    useful); // remove useless expensive nodes
 441   remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
 442   remove_useless_nodes(_inline_type_nodes,  useful); // remove useless inline type nodes
 443 #ifdef ASSERT
 444   if (_modified_nodes != NULL) {
 445     _modified_nodes->remove_useless_nodes(useful.member_set());
 446   }
 447 #endif
 448   remove_useless_coarsened_locks(useful);            // remove useless coarsened locks nodes
 449 
 450   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 451   bs->eliminate_useless_gc_barriers(useful, this);
 452   // clean up the late inline lists
 453   remove_useless_late_inlines(                &_late_inlines, useful);
 454   remove_useless_late_inlines(         &_string_late_inlines, useful);
 455   remove_useless_late_inlines(         &_boxing_late_inlines, useful);
 456   remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
 457   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 458 }
 459 
 460 // ============================================================================
 461 //------------------------------CompileWrapper---------------------------------
 462 class CompileWrapper : public StackObj {
 463   Compile *const _compile;
 464  public:
 465   CompileWrapper(Compile* compile);
 466 
 467   ~CompileWrapper();

 574 #ifndef PRODUCT
 575                   _igv_idx(0),
 576                   _trace_opto_output(directive->TraceOptoOutputOption),
 577                   _print_ideal(directive->PrintIdealOption),
 578 #endif
 579                   _has_method_handle_invokes(false),
 580                   _clinit_barrier_on_entry(false),
 581                   _stress_seed(0),
 582                   _comp_arena(mtCompiler),
 583                   _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 584                   _env(ci_env),
 585                   _directive(directive),
 586                   _log(ci_env->log()),
 587                   _failure_reason(NULL),
 588                   _intrinsics        (comp_arena(), 0, 0, NULL),
 589                   _macro_nodes       (comp_arena(), 8, 0, NULL),
 590                   _predicate_opaqs   (comp_arena(), 8, 0, NULL),
 591                   _skeleton_predicate_opaqs (comp_arena(), 8, 0, NULL),
 592                   _expensive_nodes   (comp_arena(), 8, 0, NULL),
 593                   _for_post_loop_igvn(comp_arena(), 8, 0, NULL),
 594                   _inline_type_nodes (comp_arena(), 8, 0, NULL),
 595                   _coarsened_locks   (comp_arena(), 8, 0, NULL),
 596                   _congraph(NULL),
 597                   NOT_PRODUCT(_printer(NULL) COMMA)
 598                   _dead_node_list(comp_arena()),
 599                   _dead_node_count(0),
 600                   _node_arena(mtCompiler),
 601                   _old_arena(mtCompiler),
 602                   _mach_constant_base_node(NULL),
 603                   _Compile_types(mtCompiler),
 604                   _initial_gvn(NULL),
 605                   _for_igvn(NULL),
 606                   _late_inlines(comp_arena(), 2, 0, NULL),
 607                   _string_late_inlines(comp_arena(), 2, 0, NULL),
 608                   _boxing_late_inlines(comp_arena(), 2, 0, NULL),
 609                   _vector_reboxing_late_inlines(comp_arena(), 2, 0, NULL),
 610                   _late_inlines_pos(0),
 611                   _number_of_mh_late_inlines(0),
 612                   _native_invokers(comp_arena(), 1, 0, NULL),
 613                   _print_inlining_stream(NULL),
 614                   _print_inlining_list(NULL),

 679   // Node list that Iterative GVN will start with
 680   Unique_Node_List for_igvn(comp_arena());
 681   set_for_igvn(&for_igvn);
 682 
 683   // GVN that will be run immediately on new nodes
 684   uint estimated_size = method()->code_size()*4+64;
 685   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
 686   PhaseGVN gvn(node_arena(), estimated_size);
 687   set_initial_gvn(&gvn);
 688 
 689   print_inlining_init();
 690   { // Scope for timing the parser
 691     TracePhase tp("parse", &timers[_t_parser]);
 692 
 693     // Put top into the hash table ASAP.
 694     initial_gvn()->transform_no_reclaim(top());
 695 
 696     // Set up tf(), start(), and find a CallGenerator.
 697     CallGenerator* cg = NULL;
 698     if (is_osr_compilation()) {
 699       init_tf(TypeFunc::make(method(), /* is_osr_compilation = */ true));
 700       StartNode* s = new StartOSRNode(root(), tf()->domain_sig());


 701       initial_gvn()->set_type_bottom(s);
 702       init_start(s);
 703       cg = CallGenerator::for_osr(method(), entry_bci());
 704     } else {
 705       // Normal case.
 706       init_tf(TypeFunc::make(method()));
 707       StartNode* s = new StartNode(root(), tf()->domain_cc());
 708       initial_gvn()->set_type_bottom(s);
 709       init_start(s);
 710       if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
 711         // With java.lang.ref.reference.get() we must go through the
 712         // intrinsic - even when get() is the root
 713         // method of the compile - so that, if necessary, the value in
 714         // the referent field of the reference object gets recorded by
 715         // the pre-barrier code.
 716         cg = find_intrinsic(method(), false);
 717       }
 718       if (cg == NULL) {
 719         float past_uses = method()->interpreter_invocation_count();
 720         float expected_uses = past_uses;
 721         cg = CallGenerator::for_inline(method(), expected_uses);
 722       }
 723     }
 724     if (failing())  return;
 725     if (cg == NULL) {
 726       record_method_not_compilable("cannot parse method");
 727       return;

 818     }
 819   }
 820 #endif
 821 
 822 #ifdef ASSERT
 823   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 824   bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
 825 #endif
 826 
 827   // Dump compilation data to replay it.
 828   if (directive->DumpReplayOption) {
 829     env()->dump_replay_data(_compile_id);
 830   }
 831   if (directive->DumpInlineOption && (ilt() != NULL)) {
 832     env()->dump_inline_data(_compile_id);
 833   }
 834 
 835   // Now that we know the size of all the monitors we can add a fixed slot
 836   // for the original deopt pc.
 837   int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
 838   if (needs_stack_repair()) {
 839     // One extra slot for the special stack increment value
 840     next_slot += 2;
 841   }
 842   set_fixed_slots(next_slot);
 843 
 844   // Compute when to use implicit null checks. Used by matching trap based
 845   // nodes and NullCheck optimization.
 846   set_allowed_deopt_reasons();
 847 
 848   // Now generate code
 849   Code_Gen();
 850 }
 851 
 852 //------------------------------Compile----------------------------------------
 853 // Compile a runtime stub
 854 Compile::Compile( ciEnv* ci_env,
 855                   TypeFunc_generator generator,
 856                   address stub_function,
 857                   const char *stub_name,
 858                   int is_fancy_jump,
 859                   bool pass_tls,
 860                   bool return_pc,
 861                   DirectiveSet* directive)

 978   // Create Debug Information Recorder to record scopes, oopmaps, etc.
 979   env()->set_oop_recorder(new OopRecorder(env()->arena()));
 980   env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
 981   env()->set_dependencies(new Dependencies(env()));
 982 
 983   _fixed_slots = 0;
 984   set_has_split_ifs(false);
 985   set_has_loops(false); // first approximation
 986   set_has_stringbuilder(false);
 987   set_has_boxed_value(false);
 988   _trap_can_recompile = false;  // no traps emitted yet
 989   _major_progress = true; // start out assuming good things will happen
 990   set_has_unsafe_access(false);
 991   set_max_vector_size(0);
 992   set_clear_upper_avx(false);  //false as default for clear upper bits of ymm registers
 993   Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
 994   set_decompile_count(0);
 995 
 996   set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
 997   _loop_opts_cnt = LoopOptsCount;
 998   _has_flattened_accesses = false;
 999   _flattened_accesses_share_alias = true;
1000   _scalarize_in_safepoints = false;
1001 
1002   set_do_inlining(Inline);
1003   set_max_inline_size(MaxInlineSize);
1004   set_freq_inline_size(FreqInlineSize);
1005   set_do_scheduling(OptoScheduling);
1006 
1007   set_do_vector_loop(false);
1008 
1009   if (AllowVectorizeOnDemand) {
1010     if (has_method() && (_directive->VectorizeOption || _directive->VectorizeDebugOption)) {
1011       set_do_vector_loop(true);
1012       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
1013     } else if (has_method() && method()->name() != 0 &&
1014                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1015       set_do_vector_loop(true);
1016     }
1017   }
1018   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1019   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})
1020 
1021   set_age_code(has_method() && method()->profile_aging());

1284 bool Compile::allow_range_check_smearing() const {
1285   // If this method has already thrown a range-check,
1286   // assume it was because we already tried range smearing
1287   // and it failed.
1288   uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1289   return !already_trapped;
1290 }
1291 
1292 
1293 //------------------------------flatten_alias_type-----------------------------
1294 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1295   int offset = tj->offset();
1296   TypePtr::PTR ptr = tj->ptr();
1297 
1298   // Known instance (scalarizable allocation) alias only with itself.
1299   bool is_known_inst = tj->isa_oopptr() != NULL &&
1300                        tj->is_oopptr()->is_known_instance();
1301 
1302   // Process weird unsafe references.
1303   if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1304     bool default_value_load = EnableValhalla && tj->is_instptr()->klass() == ciEnv::current()->Class_klass();
1305     assert(InlineUnsafeOps || default_value_load, "indeterminate pointers come only from unsafe ops");
1306     assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1307     tj = TypeOopPtr::BOTTOM;
1308     ptr = tj->ptr();
1309     offset = tj->offset();
1310   }
1311 
1312   // Array pointers need some flattening
1313   const TypeAryPtr *ta = tj->isa_aryptr();
1314   if (ta && ta->is_stable()) {
1315     // Erase stability property for alias analysis.
1316     tj = ta = ta->cast_to_stable(false);
1317   }
1318   if (ta && ta->is_not_flat()) {
1319     // Erase not flat property for alias analysis.
1320     tj = ta = ta->cast_to_not_flat(false);
1321   }
1322   if (ta && ta->is_not_null_free()) {
1323     // Erase not null free property for alias analysis.
1324     tj = ta = ta->cast_to_not_null_free(false);
1325   }
1326 
1327   if( ta && is_known_inst ) {
1328     if ( offset != Type::OffsetBot &&
1329          offset > arrayOopDesc::length_offset_in_bytes() ) {
1330       offset = Type::OffsetBot; // Flatten constant access into array body only
1331       tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, Type::Offset(offset), ta->field_offset(), ta->instance_id());
1332     }
1333   } else if( ta && _AliasLevel >= 2 ) {
1334     // For arrays indexed by constant indices, we flatten the alias
1335     // space to include all of the array body.  Only the header, klass
1336     // and array length can be accessed un-aliased.
1337     // For flattened inline type array, each field has its own slice so
1338     // we must include the field offset.
1339     if( offset != Type::OffsetBot ) {
1340       if( ta->const_oop() ) { // MethodData* or Method*
1341         offset = Type::OffsetBot;   // Flatten constant access into array body
1342         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
1343       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1344         // range is OK as-is.
1345         tj = ta = TypeAryPtr::RANGE;
1346       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1347         tj = TypeInstPtr::KLASS; // all klass loads look alike
1348         ta = TypeAryPtr::RANGE; // generic ignored junk
1349         ptr = TypePtr::BotPTR;
1350       } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1351         tj = TypeInstPtr::MARK;
1352         ta = TypeAryPtr::RANGE; // generic ignored junk
1353         ptr = TypePtr::BotPTR;
1354       } else {                  // Random constant offset into array body
1355         offset = Type::OffsetBot;   // Flatten constant access into array body
1356         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
1357       }
1358     }
1359     // Arrays of fixed size alias with arrays of unknown size.
1360     if (ta->size() != TypeInt::POS) {
1361       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1362       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,Type::Offset(offset), ta->field_offset());
1363     }
1364     // Arrays of known objects become arrays of unknown objects.
1365     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1366       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1367       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
1368     }
1369     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1370       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1371       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
1372     }
1373     // Initially all flattened array accesses share a single slice
1374     if (ta->is_flat() && ta->elem() != TypeInlineType::BOTTOM && _flattened_accesses_share_alias) {
1375       const TypeAry *tary = TypeAry::make(TypeInlineType::BOTTOM, ta->size());
1376       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), Type::Offset(Type::OffsetBot));
1377     }
1378     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1379     // cannot be distinguished by bytecode alone.
1380     if (ta->elem() == TypeInt::BOOL) {
1381       const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1382       ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1383       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset());
1384     }
1385     // During the 2nd round of IterGVN, NotNull castings are removed.
1386     // Make sure the Bottom and NotNull variants alias the same.
1387     // Also, make sure exact and non-exact variants alias the same.
1388     if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
1389       tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
1390     }
1391   }
1392 
1393   // Oop pointers need some flattening
1394   const TypeInstPtr *to = tj->isa_instptr();
1395   if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
1396     ciInstanceKlass *k = to->klass()->as_instance_klass();
1397     if( ptr == TypePtr::Constant ) {
1398       if (to->klass() != ciEnv::current()->Class_klass() ||
1399           offset < k->layout_helper_size_in_bytes()) {
1400         // No constant oop pointers (such as Strings); they alias with
1401         // unknown strings.
1402         assert(!is_known_inst, "not scalarizable allocation");
1403         tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset));
1404       }
1405     } else if( is_known_inst ) {
1406       tj = to; // Keep NotNull and klass_is_exact for instance type
1407     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1408       // During the 2nd round of IterGVN, NotNull castings are removed.
1409       // Make sure the Bottom and NotNull variants alias the same.
1410       // Also, make sure exact and non-exact variants alias the same.
1411       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset));
1412     }
1413     if (to->speculative() != NULL) {
1414       tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),Type::Offset(to->offset()), to->klass()->flatten_array(), to->instance_id());
1415     }
1416     // Canonicalize the holder of this field
1417     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1418       // First handle header references such as a LoadKlassNode, even if the
1419       // object's klass is unloaded at compile time (4965979).
1420       if (!is_known_inst) { // Do it only for non-instance types
1421         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, Type::Offset(offset));
1422       }
1423     } else if (offset < 0 || offset >= k->layout_helper_size_in_bytes()) {
1424       // Static fields are in the space above the normal instance
1425       // fields in the java.lang.Class instance.
1426       if (to->klass() != ciEnv::current()->Class_klass()) {
1427         to = NULL;
1428         tj = TypeOopPtr::BOTTOM;
1429         offset = tj->offset();
1430       }
1431     } else {
1432       ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
1433       assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1434       if (!k->equals(canonical_holder) || tj->offset() != offset) {
1435         if( is_known_inst ) {
1436           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, Type::Offset(offset), canonical_holder->flatten_array(), to->instance_id());
1437         } else {
1438           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, Type::Offset(offset));
1439         }
1440       }
1441     }
1442   }
1443 
1444   // Klass pointers to object array klasses need some flattening
1445   const TypeKlassPtr *tk = tj->isa_klassptr();
1446   if( tk ) {
1447     // If we are referencing a field within a Klass, we need
1448     // to assume the worst case of an Object.  Both exact and
1449     // inexact types must flatten to the same alias class so
1450     // use NotNull as the PTR.
1451     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1452 
1453       tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
1454                                    TypeInstKlassPtr::OBJECT->klass(),
1455                                    Type::Offset(offset));
1456     }
1457 
1458     ciKlass* klass = tk->klass();
1459     if (klass != NULL && klass->is_obj_array_klass()) {
1460       ciKlass* k = TypeAryPtr::OOPS->klass();
1461       if( !k || !k->is_loaded() )                  // Only fails for some -Xcomp runs
1462         k = TypeInstPtr::BOTTOM->klass();
1463       tj = tk = TypeKlassPtr::make(TypePtr::NotNull, k, Type::Offset(offset));
1464     }
1465 
1466     // Check for precise loads from the primary supertype array and force them
1467     // to the supertype cache alias index.  Check for generic array loads from
1468     // the primary supertype array and also force them to the supertype cache
1469     // alias index.  Since the same load can reach both, we need to merge
1470     // these 2 disparate memories into the same alias class.  Since the
1471     // primary supertype array is read-only, there's no chance of confusion
1472     // where we bypass an array load and an array store.
1473     int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1474     if (offset == Type::OffsetBot ||
1475         (offset >= primary_supers_offset &&
1476          offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1477         offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1478       offset = in_bytes(Klass::secondary_super_cache_offset());
1479       tj = tk = TypeKlassPtr::make(TypePtr::NotNull, tk->klass(), Type::Offset(offset));
1480     }
1481   }
1482 
1483   // Flatten all Raw pointers together.
1484   if (tj->base() == Type::RawPtr)
1485     tj = TypeRawPtr::BOTTOM;
1486 
1487   if (tj->base() == Type::AnyPtr)
1488     tj = TypePtr::BOTTOM;      // An error, which the caller must check for.
1489 
1490   // Flatten all to bottom for now
1491   switch( _AliasLevel ) {
1492   case 0:
1493     tj = TypePtr::BOTTOM;
1494     break;
1495   case 1:                       // Flatten to: oop, static, field or array
1496     switch (tj->base()) {
1497     //case Type::AryPtr: tj = TypeAryPtr::RANGE;    break;
1498     case Type::RawPtr:   tj = TypeRawPtr::BOTTOM;   break;
1499     case Type::AryPtr:   // do not distinguish arrays at all

1600   intptr_t key = (intptr_t) adr_type;
1601   key ^= key >> logAliasCacheSize;
1602   return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1603 }
1604 
1605 
1606 //-----------------------------grow_alias_types--------------------------------
1607 void Compile::grow_alias_types() {
1608   const int old_ats  = _max_alias_types; // how many before?
1609   const int new_ats  = old_ats;          // how many more?
1610   const int grow_ats = old_ats+new_ats;  // how many now?
1611   _max_alias_types = grow_ats;
1612   _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1613   AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1614   Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1615   for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
1616 }
1617 
1618 
1619 //--------------------------------find_alias_type------------------------------
1620 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field, bool uncached) {
1621   if (_AliasLevel == 0)
1622     return alias_type(AliasIdxBot);
1623 
1624   AliasCacheEntry* ace = NULL;
1625   if (!uncached) {
1626     ace = probe_alias_cache(adr_type);
1627     if (ace->_adr_type == adr_type) {
1628       return alias_type(ace->_index);
1629     }
1630   }
1631 
1632   // Handle special cases.
1633   if (adr_type == NULL)             return alias_type(AliasIdxTop);
1634   if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);
1635 
1636   // Do it the slow way.
1637   const TypePtr* flat = flatten_alias_type(adr_type);
1638 
1639 #ifdef ASSERT
1640   {
1641     ResourceMark rm;
1642     assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1643            Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1644     assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1645            Type::str(adr_type));
1646     if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1647       const TypeOopPtr* foop = flat->is_oopptr();
1648       // Scalarizable allocations have exact klass always.
1649       bool exact = !foop->klass_is_exact() || foop->is_known_instance();

1659     if (alias_type(i)->adr_type() == flat) {
1660       idx = i;
1661       break;
1662     }
1663   }
1664 
1665   if (idx == AliasIdxTop) {
1666     if (no_create)  return NULL;
1667     // Grow the array if necessary.
1668     if (_num_alias_types == _max_alias_types)  grow_alias_types();
1669     // Add a new alias type.
1670     idx = _num_alias_types++;
1671     _alias_types[idx]->Init(idx, flat);
1672     if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
1673     if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
1674     if (flat->isa_instptr()) {
1675       if (flat->offset() == java_lang_Class::klass_offset()
1676           && flat->is_instptr()->klass() == env()->Class_klass())
1677         alias_type(idx)->set_rewritable(false);
1678     }
1679     ciField* field = NULL;
1680     if (flat->isa_aryptr()) {
1681 #ifdef ASSERT
1682       const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1683       // (T_BYTE has the weakest alignment and size restrictions...)
1684       assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1685 #endif
1686       const Type* elemtype = flat->is_aryptr()->elem();
1687       if (flat->offset() == TypePtr::OffsetBot) {
1688         alias_type(idx)->set_element(elemtype);
1689       }
1690       int field_offset = flat->is_aryptr()->field_offset().get();
1691       if (elemtype->isa_inlinetype() &&
1692           field_offset != Type::OffsetBot) {
1693         ciInlineKlass* vk = elemtype->inline_klass();
1694         field_offset += vk->first_field_offset();
1695         field = vk->get_field_by_offset(field_offset, false);
1696       }
1697     }
1698     if (flat->isa_klassptr()) {
1699       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1700         alias_type(idx)->set_rewritable(false);
1701       if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1702         alias_type(idx)->set_rewritable(false);
1703       if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1704         alias_type(idx)->set_rewritable(false);
1705       if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1706         alias_type(idx)->set_rewritable(false);
1707       if (flat->offset() == in_bytes(Klass::layout_helper_offset()))
1708         alias_type(idx)->set_rewritable(false);
1709       if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1710         alias_type(idx)->set_rewritable(false);
1711     }
1712     // %%% (We would like to finalize JavaThread::threadObj_offset(),
1713     // but the base pointer type is not distinctive enough to identify
1714     // references into JavaThread.)
1715 
1716     // Check for final fields.
1717     const TypeInstPtr* tinst = flat->isa_instptr();
1718     if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {

1719       if (tinst->const_oop() != NULL &&
1720           tinst->klass() == ciEnv::current()->Class_klass() &&
1721           tinst->offset() >= (tinst->klass()->as_instance_klass()->layout_helper_size_in_bytes())) {
1722         // static field
1723         ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1724         field = k->get_field_by_offset(tinst->offset(), true);
1725       } else if (tinst->klass()->is_inlinetype()) {
1726         // Inline type field
1727         ciInlineKlass* vk = tinst->inline_klass();
1728         field = vk->get_field_by_offset(tinst->offset(), false);
1729       } else {
1730         ciInstanceKlass* k = tinst->klass()->as_instance_klass();
1731         field = k->get_field_by_offset(tinst->offset(), false);
1732       }
1733     }
1734     assert(field == NULL ||
1735            original_field == NULL ||
1736            (field->holder() == original_field->holder() &&
1737             field->offset() == original_field->offset() &&
1738             field->is_static() == original_field->is_static()), "wrong field?");
1739     // Set field() and is_rewritable() attributes.
1740     if (field != NULL) {
1741       alias_type(idx)->set_field(field);
1742       if (flat->isa_aryptr()) {
1743         // Fields of flat arrays are rewritable although they are declared final
1744         assert(flat->is_aryptr()->is_flat(), "must be a flat array");
1745         alias_type(idx)->set_rewritable(true);
1746       }
1747     }
1748   }
1749 
1750   // Fill the cache for next time.
1751   if (!uncached) {
1752     ace->_adr_type = adr_type;
1753     ace->_index    = idx;
1754     assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");
1755 
1756     // Might as well try to fill the cache for the flattened version, too.
1757     AliasCacheEntry* face = probe_alias_cache(flat);
1758     if (face->_adr_type == NULL) {
1759       face->_adr_type = flat;
1760       face->_index    = idx;
1761       assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1762     }
1763   }
1764 
1765   return alias_type(idx);
1766 }
1767 
1768 
1769 Compile::AliasType* Compile::alias_type(ciField* field) {
1770   const TypeOopPtr* t;
1771   if (field->is_static())
1772     t = TypeInstPtr::make(field->holder()->java_mirror());
1773   else
1774     t = TypeOopPtr::make_from_klass_raw(field->holder());
1775   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1776   assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1777   return atp;
1778 }
1779 
1780 
1781 //------------------------------have_alias_type--------------------------------
1782 bool Compile::have_alias_type(const TypePtr* adr_type) {

1859   C->set_post_loop_opts_phase(); // no more loop opts allowed
1860 
1861   assert(!C->major_progress(), "not cleared");
1862 
1863   if (_for_post_loop_igvn.length() > 0) {
1864     while (_for_post_loop_igvn.length() > 0) {
1865       Node* n = _for_post_loop_igvn.pop();
1866       n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1867       igvn._worklist.push(n);
1868     }
1869     igvn.optimize();
1870     assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1871 
1872     // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1873     if (C->major_progress()) {
1874       C->clear_major_progress(); // ensure that major progress is now clear
1875     }
1876   }
1877 }
1878 
1879 void Compile::add_inline_type(Node* n) {
1880   assert(n->is_InlineTypeBase(), "unexpected node");
1881   _inline_type_nodes.push(n);
1882 }
1883 
1884 void Compile::remove_inline_type(Node* n) {
1885   assert(n->is_InlineTypeBase(), "unexpected node");
1886   if (_inline_type_nodes.contains(n)) {
1887     _inline_type_nodes.remove(n);
1888   }
1889 }
1890 
1891 // Does the return value keep otherwise useless inline type allocations alive?
1892 static bool return_val_keeps_allocations_alive(Node* ret_val) {
1893   ResourceMark rm;
1894   Unique_Node_List wq;
1895   wq.push(ret_val);
1896   bool some_allocations = false;
1897   for (uint i = 0; i < wq.size(); i++) {
1898     Node* n = wq.at(i);
1899     assert(!n->is_InlineType(), "chain of inline type nodes");
1900     if (n->outcnt() > 1) {
1901       // Some other use for the allocation
1902       return false;
1903     } else if (n->is_InlineTypePtr()) {
1904       wq.push(n->in(1));
1905     } else if (n->is_Phi()) {
1906       for (uint j = 1; j < n->req(); j++) {
1907         wq.push(n->in(j));
1908       }
1909     } else if (n->is_CheckCastPP() &&
1910                n->in(1)->is_Proj() &&
1911                n->in(1)->in(0)->is_Allocate()) {
1912       some_allocations = true;
1913     } else if (n->is_CheckCastPP()) {
1914       wq.push(n->in(1));
1915     }
1916   }
1917   return some_allocations;
1918 }
1919 
1920 void Compile::process_inline_types(PhaseIterGVN &igvn, bool remove) {
1921   // Make sure that the return value does not keep an otherwise unused allocation alive
1922   if (tf()->returns_inline_type_as_fields()) {
1923     Node* ret = NULL;
1924     for (uint i = 1; i < root()->req(); i++) {
1925       Node* in = root()->in(i);
1926       if (in->Opcode() == Op_Return) {
1927         assert(ret == NULL, "only one return");
1928         ret = in;
1929       }
1930     }
1931     if (ret != NULL) {
1932       Node* ret_val = ret->in(TypeFunc::Parms);
1933       if (igvn.type(ret_val)->isa_oopptr() &&
1934           return_val_keeps_allocations_alive(ret_val)) {
1935         igvn.replace_input_of(ret, TypeFunc::Parms, InlineTypeNode::tagged_klass(igvn.type(ret_val)->inline_klass(), igvn));
1936         assert(ret_val->outcnt() == 0, "should be dead now");
1937         igvn.remove_dead_node(ret_val);
1938       }
1939     }
1940   }
1941   if (_inline_type_nodes.length() == 0) {
1942     return;
1943   }
1944   // Scalarize inline types in safepoint debug info.
1945   // Delay this until all inlining is over to avoid getting inconsistent debug info.
1946   set_scalarize_in_safepoints(true);
1947   for (int i = _inline_type_nodes.length()-1; i >= 0; i--) {
1948     _inline_type_nodes.at(i)->as_InlineTypeBase()->make_scalar_in_safepoints(&igvn);
1949   }
1950   if (remove) {
1951     // Remove inline type nodes
1952     while (_inline_type_nodes.length() > 0) {
1953       InlineTypeBaseNode* vt = _inline_type_nodes.pop()->as_InlineTypeBase();
1954       if (vt->outcnt() == 0) {
1955         igvn.remove_dead_node(vt);
1956       } else if (vt->is_InlineTypePtr()) {
1957         igvn.replace_node(vt, vt->get_oop());
1958       } else {
1959         // Check if any users are blackholes. If so, rewrite them to use either the
1960         // allocated buffer, or individual components, instead of the inline type node
1961         // that goes away.
1962         for (DUIterator i = vt->outs(); vt->has_out(i); i++) {
1963           if (vt->out(i)->is_Blackhole()) {
1964             BlackholeNode* bh = vt->out(i)->as_Blackhole();
1965 
1966             // Unlink the old input
1967             int idx = bh->find_edge(vt);
1968             assert(idx != -1, "The edge should be there");
1969             bh->del_req(idx);
1970             --i;
1971 
1972             if (vt->is_allocated(&igvn)) {
1973               // Already has the allocated instance, blackhole that
1974               bh->add_req(vt->get_oop());
1975             } else {
1976               // Not allocated yet, blackhole the components
1977               for (uint c = 0; c < vt->field_count(); c++) {
1978                 bh->add_req(vt->field_value(c));
1979               }
1980             }
1981 
1982             // Node modified, record for IGVN
1983             igvn.record_for_igvn(bh);
1984           }
1985         }
1986 
1987 #ifdef ASSERT
1988         for (DUIterator_Fast imax, i = vt->fast_outs(imax); i < imax; i++) {
1989           assert(vt->fast_out(i)->is_InlineTypeBase(), "Unexpected inline type user");
1990         }
1991 #endif
1992         igvn.replace_node(vt, igvn.C->top());
1993       }
1994     }
1995   }
1996   igvn.optimize();
1997 }
1998 
1999 void Compile::adjust_flattened_array_access_aliases(PhaseIterGVN& igvn) {
2000   if (!_has_flattened_accesses) {
2001     return;
2002   }
2003   // Initially, all flattened array accesses share the same slice to
2004   // keep dependencies with Object[] array accesses (that could be
2005   // to a flattened array) correct. We're done with parsing so we
2006   // now know all flattened array accesses in this compile
2007   // unit. Let's move flattened array accesses to their own slice,
2008   // one per element field. This should help memory access
2009   // optimizations.
2010   ResourceMark rm;
2011   Unique_Node_List wq;
2012   wq.push(root());
2013 
2014   Node_List mergememnodes;
2015   Node_List memnodes;
2016 
2017   // Alias index currently shared by all flattened memory accesses
2018   int index = get_alias_index(TypeAryPtr::INLINES);
2019 
2020   // Find MergeMem nodes and flattened array accesses
2021   for (uint i = 0; i < wq.size(); i++) {
2022     Node* n = wq.at(i);
2023     if (n->is_Mem()) {
2024       const TypePtr* adr_type = NULL;
2025       if (n->Opcode() == Op_StoreCM) {
2026         adr_type = get_adr_type(get_alias_index(n->in(MemNode::OopStore)->adr_type()));
2027       } else {
2028         adr_type = get_adr_type(get_alias_index(n->adr_type()));
2029       }
2030       if (adr_type == TypeAryPtr::INLINES) {
2031         memnodes.push(n);
2032       }
2033     } else if (n->is_MergeMem()) {
2034       MergeMemNode* mm = n->as_MergeMem();
2035       if (mm->memory_at(index) != mm->base_memory()) {
2036         mergememnodes.push(n);
2037       }
2038     }
2039     for (uint j = 0; j < n->req(); j++) {
2040       Node* m = n->in(j);
2041       if (m != NULL) {
2042         wq.push(m);
2043       }
2044     }
2045   }
2046 
2047   if (memnodes.size() > 0) {
2048     _flattened_accesses_share_alias = false;
2049 
2050     // We are going to change the slice for the flattened array
2051     // accesses so we need to clear the cache entries that refer to
2052     // them.
2053     for (uint i = 0; i < AliasCacheSize; i++) {
2054       AliasCacheEntry* ace = &_alias_cache[i];
2055       if (ace->_adr_type != NULL &&
2056           ace->_adr_type->isa_aryptr() &&
2057           ace->_adr_type->is_aryptr()->is_flat()) {
2058         ace->_adr_type = NULL;
2059         ace->_index = (i != 0) ? 0 : AliasIdxTop; // Make sure the NULL adr_type resolves to AliasIdxTop
2060       }
2061     }
2062 
2063     // Find what aliases we are going to add
2064     int start_alias = num_alias_types()-1;
2065     int stop_alias = 0;
2066 
2067     for (uint i = 0; i < memnodes.size(); i++) {
2068       Node* m = memnodes.at(i);
2069       const TypePtr* adr_type = NULL;
2070       if (m->Opcode() == Op_StoreCM) {
2071         adr_type = m->in(MemNode::OopStore)->adr_type();
2072         if (adr_type != TypeAryPtr::INLINES) {
2073           // store was optimized out and we lost track of the adr_type
2074           Node* clone = new StoreCMNode(m->in(MemNode::Control), m->in(MemNode::Memory), m->in(MemNode::Address),
2075                                         m->adr_type(), m->in(MemNode::ValueIn), m->in(MemNode::OopStore),
2076                                         get_alias_index(adr_type));
2077           igvn.register_new_node_with_optimizer(clone);
2078           igvn.replace_node(m, clone);
2079         }
2080       } else {
2081         adr_type = m->adr_type();
2082 #ifdef ASSERT
2083         m->as_Mem()->set_adr_type(adr_type);
2084 #endif
2085       }
2086       int idx = get_alias_index(adr_type);
2087       start_alias = MIN2(start_alias, idx);
2088       stop_alias = MAX2(stop_alias, idx);
2089     }
2090 
2091     assert(stop_alias >= start_alias, "should have expanded aliases");
2092 
2093     Node_Stack stack(0);
2094 #ifdef ASSERT
2095     VectorSet seen(Thread::current()->resource_area());
2096 #endif
2097     // Now let's fix the memory graph so each flattened array access
2098     // is moved to the right slice. Start from the MergeMem nodes.
2099     uint last = unique();
2100     for (uint i = 0; i < mergememnodes.size(); i++) {
2101       MergeMemNode* current = mergememnodes.at(i)->as_MergeMem();
2102       Node* n = current->memory_at(index);
2103       MergeMemNode* mm = NULL;
2104       do {
2105         // Follow memory edges through memory accesses, phis and
2106         // narrow membars and push nodes on the stack. Once we hit
2107         // bottom memory, we pop element off the stack one at a
2108         // time, in reverse order, and move them to the right slice
2109         // by changing their memory edges.
2110         if ((n->is_Phi() && n->adr_type() != TypePtr::BOTTOM) || n->is_Mem() || n->adr_type() == TypeAryPtr::INLINES) {
2111           assert(!seen.test_set(n->_idx), "");
2112           // Uses (a load for instance) will need to be moved to the
2113           // right slice as well and will get a new memory state
2114           // that we don't know yet. The use could also be the
2115           // backedge of a loop. We put a place holder node between
2116           // the memory node and its uses. We replace that place
2117           // holder with the correct memory state once we know it,
2118           // i.e. when nodes are popped off the stack. Using the
2119           // place holder make the logic work in the presence of
2120           // loops.
2121           if (n->outcnt() > 1) {
2122             Node* place_holder = NULL;
2123             assert(!n->has_out_with(Op_Node), "");
2124             for (DUIterator k = n->outs(); n->has_out(k); k++) {
2125               Node* u = n->out(k);
2126               if (u != current && u->_idx < last) {
2127                 bool success = false;
2128                 for (uint l = 0; l < u->req(); l++) {
2129                   if (!stack.is_empty() && u == stack.node() && l == stack.index()) {
2130                     continue;
2131                   }
2132                   Node* in = u->in(l);
2133                   if (in == n) {
2134                     if (place_holder == NULL) {
2135                       place_holder = new Node(1);
2136                       place_holder->init_req(0, n);
2137                     }
2138                     igvn.replace_input_of(u, l, place_holder);
2139                     success = true;
2140                   }
2141                 }
2142                 if (success) {
2143                   --k;
2144                 }
2145               }
2146             }
2147           }
2148           if (n->is_Phi()) {
2149             stack.push(n, 1);
2150             n = n->in(1);
2151           } else if (n->is_Mem()) {
2152             stack.push(n, n->req());
2153             n = n->in(MemNode::Memory);
2154           } else {
2155             assert(n->is_Proj() && n->in(0)->Opcode() == Op_MemBarCPUOrder, "");
2156             stack.push(n, n->req());
2157             n = n->in(0)->in(TypeFunc::Memory);
2158           }
2159         } else {
2160           assert(n->adr_type() == TypePtr::BOTTOM || (n->Opcode() == Op_Node && n->_idx >= last) || (n->is_Proj() && n->in(0)->is_Initialize()), "");
2161           // Build a new MergeMem node to carry the new memory state
2162           // as we build it. IGVN should fold extraneous MergeMem
2163           // nodes.
2164           mm = MergeMemNode::make(n);
2165           igvn.register_new_node_with_optimizer(mm);
2166           while (stack.size() > 0) {
2167             Node* m = stack.node();
2168             uint idx = stack.index();
2169             if (m->is_Mem()) {
2170               // Move memory node to its new slice
2171               const TypePtr* adr_type = m->adr_type();
2172               int alias = get_alias_index(adr_type);
2173               Node* prev = mm->memory_at(alias);
2174               igvn.replace_input_of(m, MemNode::Memory, prev);
2175               mm->set_memory_at(alias, m);
2176             } else if (m->is_Phi()) {
2177               // We need as many new phis as there are new aliases
2178               igvn.replace_input_of(m, idx, mm);
2179               if (idx == m->req()-1) {
2180                 Node* r = m->in(0);
2181                 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2182                   const Type* adr_type = get_adr_type(j);
2183                   if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat() || j == (uint)index) {
2184                     continue;
2185                   }
2186                   Node* phi = new PhiNode(r, Type::MEMORY, get_adr_type(j));
2187                   igvn.register_new_node_with_optimizer(phi);
2188                   for (uint k = 1; k < m->req(); k++) {
2189                     phi->init_req(k, m->in(k)->as_MergeMem()->memory_at(j));
2190                   }
2191                   mm->set_memory_at(j, phi);
2192                 }
2193                 Node* base_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2194                 igvn.register_new_node_with_optimizer(base_phi);
2195                 for (uint k = 1; k < m->req(); k++) {
2196                   base_phi->init_req(k, m->in(k)->as_MergeMem()->base_memory());
2197                 }
2198                 mm->set_base_memory(base_phi);
2199               }
2200             } else {
2201               // This is a MemBarCPUOrder node from
2202               // Parse::array_load()/Parse::array_store(), in the
2203               // branch that handles flattened arrays hidden under
2204               // an Object[] array. We also need one new membar per
2205               // new alias to keep the unknown access that the
2206               // membars protect properly ordered with accesses to
2207               // known flattened array.
2208               assert(m->is_Proj(), "projection expected");
2209               Node* ctrl = m->in(0)->in(TypeFunc::Control);
2210               igvn.replace_input_of(m->in(0), TypeFunc::Control, top());
2211               for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2212                 const Type* adr_type = get_adr_type(j);
2213                 if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat() || j == (uint)index) {
2214                   continue;
2215                 }
2216                 MemBarNode* mb = new MemBarCPUOrderNode(this, j, NULL);
2217                 igvn.register_new_node_with_optimizer(mb);
2218                 Node* mem = mm->memory_at(j);
2219                 mb->init_req(TypeFunc::Control, ctrl);
2220                 mb->init_req(TypeFunc::Memory, mem);
2221                 ctrl = new ProjNode(mb, TypeFunc::Control);
2222                 igvn.register_new_node_with_optimizer(ctrl);
2223                 mem = new ProjNode(mb, TypeFunc::Memory);
2224                 igvn.register_new_node_with_optimizer(mem);
2225                 mm->set_memory_at(j, mem);
2226               }
2227               igvn.replace_node(m->in(0)->as_Multi()->proj_out(TypeFunc::Control), ctrl);
2228             }
2229             if (idx < m->req()-1) {
2230               idx += 1;
2231               stack.set_index(idx);
2232               n = m->in(idx);
2233               break;
2234             }
2235             // Take care of place holder nodes
2236             if (m->has_out_with(Op_Node)) {
2237               Node* place_holder = m->find_out_with(Op_Node);
2238               if (place_holder != NULL) {
2239                 Node* mm_clone = mm->clone();
2240                 igvn.register_new_node_with_optimizer(mm_clone);
2241                 Node* hook = new Node(1);
2242                 hook->init_req(0, mm);
2243                 igvn.replace_node(place_holder, mm_clone);
2244                 hook->destruct(&igvn);
2245               }
2246               assert(!m->has_out_with(Op_Node), "place holder should be gone now");
2247             }
2248             stack.pop();
2249           }
2250         }
2251       } while(stack.size() > 0);
2252       // Fix the memory state at the MergeMem we started from
2253       igvn.rehash_node_delayed(current);
2254       for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2255         const Type* adr_type = get_adr_type(j);
2256         if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat()) {
2257           continue;
2258         }
2259         current->set_memory_at(j, mm);
2260       }
2261       current->set_memory_at(index, current->base_memory());
2262     }
2263     igvn.optimize();
2264   }
2265   print_method(PHASE_SPLIT_INLINES_ARRAY, 2);
2266 #ifdef ASSERT
2267   if (!_flattened_accesses_share_alias) {
2268     wq.clear();
2269     wq.push(root());
2270     for (uint i = 0; i < wq.size(); i++) {
2271       Node* n = wq.at(i);
2272       assert(n->adr_type() != TypeAryPtr::INLINES, "should have been removed from the graph");
2273       for (uint j = 0; j < n->req(); j++) {
2274         Node* m = n->in(j);
2275         if (m != NULL) {
2276           wq.push(m);
2277         }
2278       }
2279     }
2280   }
2281 #endif
2282 }
2283 
2284 
2285 // StringOpts and late inlining of string methods
2286 void Compile::inline_string_calls(bool parse_time) {
2287   {
2288     // remove useless nodes to make the usage analysis simpler
2289     ResourceMark rm;
2290     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
2291   }
2292 
2293   {
2294     ResourceMark rm;
2295     print_method(PHASE_BEFORE_STRINGOPTS, 3);
2296     PhaseStringOpts pso(initial_gvn(), for_igvn());
2297     print_method(PHASE_AFTER_STRINGOPTS, 3);
2298   }
2299 
2300   // now inline anything that we skipped the first time around
2301   if (!parse_time) {
2302     _late_inlines_pos = _late_inlines.length();
2303   }
2304 

2454     assert(has_stringbuilder(), "inconsistent");
2455     for_igvn()->clear();
2456     initial_gvn()->replace_with(&igvn);
2457 
2458     inline_string_calls(false);
2459 
2460     if (failing())  return;
2461 
2462     inline_incrementally_cleanup(igvn);
2463   }
2464 
2465   set_inlining_incrementally(false);
2466 }
2467 
2468 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2469   // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2470   // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2471   // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == NULL"
2472   // as if "inlining_incrementally() == true" were set.
2473   assert(inlining_incrementally() == false, "not allowed");
2474 #ifdef ASSERT
2475   Unique_Node_List* modified_nodes = _modified_nodes;
2476   _modified_nodes = NULL;
2477 #endif
2478   assert(_late_inlines.length() > 0, "sanity");
2479 
2480   while (_late_inlines.length() > 0) {
2481     for_igvn()->clear();
2482     initial_gvn()->replace_with(&igvn);
2483 
2484     while (inline_incrementally_one()) {
2485       assert(!failing(), "inconsistent");
2486     }
2487     if (failing())  return;
2488 
2489     inline_incrementally_cleanup(igvn);
2490   }
2491   DEBUG_ONLY( _modified_nodes = modified_nodes; )
2492 }
2493 
2494 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2495   if (_loop_opts_cnt > 0) {
2496     debug_only( int cnt = 0; );
2497     while (major_progress() && (_loop_opts_cnt > 0)) {
2498       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2499       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2500       PhaseIdealLoop::optimize(igvn, mode);
2501       _loop_opts_cnt--;
2502       if (failing())  return false;
2503       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2504     }
2505   }
2506   return true;
2507 }
2508 
2509 // Remove edges from "root" to each SafePoint at a backward branch.
2510 // They were inserted during parsing (see add_safepoint()) to make
2511 // infinite loops without calls or exceptions visible to root, i.e.,

2614   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2615     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2616     initial_gvn()->replace_with(&igvn);
2617     for_igvn()->clear();
2618     Unique_Node_List new_worklist(C->comp_arena());
2619     {
2620       ResourceMark rm;
2621       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2622     }
2623     Unique_Node_List* save_for_igvn = for_igvn();
2624     set_for_igvn(&new_worklist);
2625     igvn = PhaseIterGVN(initial_gvn());
2626     igvn.optimize();
2627     set_for_igvn(save_for_igvn);
2628   }
2629 
2630   // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
2631   // safepoints
2632   remove_root_to_sfpts_edges(igvn);
2633 
2634   // Process inline type nodes now that all inlining is over
2635   process_inline_types(igvn);
2636 
2637   adjust_flattened_array_access_aliases(igvn);
2638 
2639   // Perform escape analysis
2640   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2641     if (has_loops()) {
2642       // Cleanup graph (remove dead nodes).
2643       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2644       PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2645       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2646       if (failing())  return;
2647     }
2648     ConnectionGraph::do_analysis(this, &igvn);
2649 
2650     if (failing())  return;
2651 
2652     // Optimize out fields loads from scalar replaceable allocations.
2653     igvn.optimize();
2654     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2655 
2656     if (failing())  return;
2657 
2658     if (congraph() != NULL && macro_count() > 0) {

2722   print_method(PHASE_ITER_GVN2, 2);
2723 
2724   if (failing())  return;
2725 
2726   // Loop transforms on the ideal graph.  Range Check Elimination,
2727   // peeling, unrolling, etc.
2728   if (!optimize_loops(igvn, LoopOptsDefault)) {
2729     return;
2730   }
2731 
2732   if (failing())  return;
2733 
2734   C->clear_major_progress(); // ensure that major progress is now clear
2735 
2736   process_for_post_loop_opts_igvn(igvn);
2737 
2738 #ifdef ASSERT
2739   bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2740 #endif
2741 
2742   assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2743 
2744   if (_late_inlines.length() > 0) {
2745     // More opportunities to optimize virtual and MH calls.
2746     // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2747     process_late_inline_calls_no_inline(igvn);
2748   }
2749 
2750   {
2751     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2752     PhaseMacroExpand  mex(igvn);
2753     if (mex.expand_macro_nodes()) {
2754       assert(failing(), "must bail out w/ explicit message");
2755       return;
2756     }
2757     print_method(PHASE_MACRO_EXPANSION, 2);
2758   }
2759 
2760   // Process inline type nodes again and remove them. From here
2761   // on we don't need to keep track of field values anymore.
2762   process_inline_types(igvn, /* remove= */ true);
2763 
2764   {
2765     TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
2766     if (bs->expand_barriers(this, igvn)) {
2767       assert(failing(), "must bail out w/ explicit message");
2768       return;
2769     }
2770     print_method(PHASE_BARRIER_EXPANSION, 2);
2771   }
2772 
2773   if (C->max_vector_size() > 0) {
2774     C->optimize_logic_cones(igvn);
2775     igvn.optimize();
2776   }
2777 
2778   DEBUG_ONLY( _modified_nodes = NULL; )
2779 
2780   assert(igvn._worklist.size() == 0, "not empty");
2781   assert(_late_inlines.length() == 0, "missed optimization opportunity");







2782  } // (End scope of igvn; run destructor if necessary for asserts.)
2783 
2784  check_no_dead_use();
2785 
2786  process_print_inlining();
2787 
2788  // A method with only infinite loops has no edges entering loops from root
2789  {
2790    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2791    if (final_graph_reshaping()) {
2792      assert(failing(), "must bail out w/ explicit message");
2793      return;
2794    }
2795  }
2796 
2797  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2798  DEBUG_ONLY(set_phase_optimize_finished();)
2799 }
2800 
2801 #ifdef ASSERT

3335             // Accumulate any precedence edges
3336             if (mem->in(i) != NULL) {
3337               n->add_prec(mem->in(i));
3338             }
3339           }
3340           // Everything above this point has been processed.
3341           done = true;
3342         }
3343         // Eliminate the previous StoreCM
3344         prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
3345         assert(mem->outcnt() == 0, "should be dead");
3346         mem->disconnect_inputs(this);
3347       } else {
3348         prev = mem;
3349       }
3350       mem = prev->in(MemNode::Memory);
3351     }
3352   }
3353 }
3354 
3355 
3356 //------------------------------final_graph_reshaping_impl----------------------
3357 // Implement items 1-5 from final_graph_reshaping below.
3358 void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
3359 
3360   if ( n->outcnt() == 0 ) return; // dead node
3361   uint nop = n->Opcode();
3362 
3363   // Check for 2-input instruction with "last use" on right input.
3364   // Swap to left input.  Implements item (2).
3365   if( n->req() == 3 &&          // two-input instruction
3366       n->in(1)->outcnt() > 1 && // left use is NOT a last use
3367       (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
3368       n->in(2)->outcnt() == 1 &&// right use IS a last use
3369       !n->in(2)->is_Con() ) {   // right use is not a constant
3370     // Check for commutative opcode
3371     switch( nop ) {
3372     case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
3373     case Op_MaxI:  case Op_MaxL:  case Op_MaxF:  case Op_MaxD:
3374     case Op_MinI:  case Op_MinL:  case Op_MinF:  case Op_MinD:
3375     case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:

3490       if (n->outcnt() > 1 &&
3491           !n->is_Proj() &&
3492           nop != Op_CreateEx &&
3493           nop != Op_CheckCastPP &&
3494           nop != Op_DecodeN &&
3495           nop != Op_DecodeNKlass &&
3496           !n->is_Mem() &&
3497           !n->is_Phi()) {
3498         Node *x = n->clone();
3499         call->set_req(TypeFunc::Parms, x);
3500       }
3501     }
3502     break;
3503   }
3504 
3505   case Op_StoreCM:
3506     {
3507       // Convert OopStore dependence into precedence edge
3508       Node* prec = n->in(MemNode::OopStore);
3509       n->del_req(MemNode::OopStore);
3510       if (prec->is_MergeMem()) {
3511         MergeMemNode* mm = prec->as_MergeMem();
3512         Node* base = mm->base_memory();
3513         for (int i = AliasIdxRaw + 1; i < num_alias_types(); i++) {
3514           const Type* adr_type = get_adr_type(i);
3515           if (adr_type->isa_aryptr() && adr_type->is_aryptr()->is_flat()) {
3516             Node* m = mm->memory_at(i);
3517             n->add_prec(m);
3518           }
3519         }
3520         if (mm->outcnt() == 0) {
3521           mm->disconnect_inputs(this);
3522         }
3523       } else {
3524         n->add_prec(prec);
3525       }
3526       eliminate_redundant_card_marks(n);
3527     }
3528 
3529     // fall through
3530 
3531   case Op_StoreB:
3532   case Op_StoreC:
3533   case Op_StorePConditional:
3534   case Op_StoreI:
3535   case Op_StoreL:
3536   case Op_StoreIConditional:
3537   case Op_StoreLConditional:
3538   case Op_CompareAndSwapB:
3539   case Op_CompareAndSwapS:
3540   case Op_CompareAndSwapI:
3541   case Op_CompareAndSwapL:
3542   case Op_CompareAndSwapP:
3543   case Op_CompareAndSwapN:
3544   case Op_WeakCompareAndSwapB:
3545   case Op_WeakCompareAndSwapS:

4077           // Replace all nodes with identical edges as m with m
4078           k->subsume_by(m, this);
4079         }
4080       }
4081     }
4082     break;
4083   }
4084   case Op_CmpUL: {
4085     if (!Matcher::has_match_rule(Op_CmpUL)) {
4086       // No support for unsigned long comparisons
4087       ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
4088       Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
4089       Node* orl = new OrLNode(n->in(1), sign_bit_mask);
4090       ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
4091       Node* andl = new AndLNode(orl, remove_sign_mask);
4092       Node* cmp = new CmpLNode(andl, n->in(2));
4093       n->subsume_by(cmp, this);
4094     }
4095     break;
4096   }
4097 #ifdef ASSERT
4098   case Op_InlineTypePtr:
4099   case Op_InlineType: {
4100     n->dump(-1);
4101     assert(false, "inline type node was not removed");
4102     break;
4103   }
4104 #endif
4105   default:
4106     assert(!n->is_Call(), "");
4107     assert(!n->is_Mem(), "");
4108     assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
4109     break;
4110   }
4111 }
4112 
4113 //------------------------------final_graph_reshaping_walk---------------------
4114 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
4115 // requires that the walk visits a node's inputs before visiting the node.
4116 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
4117   Unique_Node_List sfpt;
4118 
4119   frc._visited.set(root->_idx); // first, mark node as visited
4120   uint cnt = root->req();
4121   Node *n = root;
4122   uint  i = 0;
4123   while (true) {
4124     if (i < cnt) {

4432   }
4433 }
4434 
4435 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4436   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4437 }
4438 
4439 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4440   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4441 }
4442 
4443 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4444   if (holder->is_initialized()) {
4445     return false;
4446   }
4447   if (holder->is_being_initialized()) {
4448     if (accessing_method->holder() == holder) {
4449       // Access inside a class. The barrier can be elided when access happens in <clinit>,
4450       // <init>, or a static method. In all those cases, there was an initialization
4451       // barrier on the holder klass passed.
4452       if (accessing_method->is_class_initializer() ||
4453           accessing_method->is_object_constructor() ||
4454           accessing_method->is_static()) {
4455         return false;
4456       }
4457     } else if (accessing_method->holder()->is_subclass_of(holder)) {
4458       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4459       // In case of <init> or a static method, the barrier is on the subclass is not enough:
4460       // child class can become fully initialized while its parent class is still being initialized.
4461       if (accessing_method->is_class_initializer()) {
4462         return false;
4463       }
4464     }
4465     ciMethod* root = method(); // the root method of compilation
4466     if (root != accessing_method) {
4467       return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4468     }
4469   }
4470   return true;
4471 }
4472 
4473 #ifndef PRODUCT
4474 //------------------------------verify_graph_edges---------------------------
4475 // Walk the Graph and verify that there is a one-to-one correspondence
4476 // between Use-Def edges and Def-Use edges in the graph.
4477 void Compile::verify_graph_edges(bool no_dead_code) {
4478   if (VerifyGraphEdges) {
4479     Unique_Node_List visited;
4480     // Call recursive graph walk to check edges
4481     _root->verify_edges(visited);

4562                   _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
4563   }
4564 
4565   if (VerifyIdealNodeCount) {
4566     Compile::current()->print_missing_nodes();
4567   }
4568 #endif
4569 
4570   if (_log != NULL) {
4571     _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
4572   }
4573 }
4574 
4575 //----------------------------static_subtype_check-----------------------------
4576 // Shortcut important common cases when superklass is exact:
4577 // (0) superklass is java.lang.Object (can occur in reflective code)
4578 // (1) subklass is already limited to a subtype of superklass => always ok
4579 // (2) subklass does not overlap with superklass => always fail
4580 // (3) superklass has NO subtypes and we can check with a simple compare.
4581 int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
4582   if (StressReflectiveCode || superk == NULL || subk == NULL) {
4583     return SSC_full_test;       // Let caller generate the general case.
4584   }
4585 
4586   if (superk == env()->Object_klass()) {
4587     return SSC_always_true;     // (0) this test cannot fail
4588   }
4589 
4590   ciType* superelem = superk;
4591   ciType* subelem = subk;
4592   if (superelem->is_array_klass()) {
4593     superelem = superelem->as_array_klass()->base_element_type();
4594   }
4595   if (subelem->is_array_klass()) {
4596     subelem = subelem->as_array_klass()->base_element_type();
4597   }
4598 
4599   if (!subk->is_interface()) {  // cannot trust static interface types yet
4600     if (subk->is_subtype_of(superk)) {
4601       return SSC_always_true;   // (1) false path dead; no dynamic test needed
4602     }
4603     if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
4604         !(subelem->is_klass() && subelem->as_klass()->is_interface()) &&
4605         !superk->is_subtype_of(subk)) {
4606       return SSC_always_false;  // (2) true path dead; no dynamic test needed
4607     }
4608   }
4609 
4610   // Do not fold the subtype check to an array klass pointer comparison for [V? arrays.
4611   // [QMyValue is a subtype of [LMyValue but the klass for [QMyValue is not equal to
4612   // the klass for [LMyValue. Perform a full test.
4613   if (superk->is_obj_array_klass() && !superk->as_array_klass()->is_elem_null_free() &&
4614       superk->as_array_klass()->element_klass()->is_inlinetype()) {
4615     return SSC_full_test;
4616   }
4617   // If casting to an instance klass, it must have no subtypes
4618   if (superk->is_interface()) {
4619     // Cannot trust interfaces yet.
4620     // %%% S.B. superk->nof_implementors() == 1
4621   } else if (superelem->is_instance_klass()) {
4622     ciInstanceKlass* ik = superelem->as_instance_klass();
4623     if (!ik->has_subklass() && !ik->is_interface()) {
4624       if (!ik->is_final()) {
4625         // Add a dependency if there is a chance of a later subclass.
4626         dependencies()->assert_leaf_type(ik);
4627       }
4628       return SSC_easy_test;     // (3) caller can do a simple ptr comparison
4629     }
4630   } else {
4631     // A primitive array type has no subtypes.
4632     return SSC_easy_test;       // (3) caller can do a simple ptr comparison
4633   }
4634 
4635   return SSC_full_test;
4636 }

5128       const Type* t = igvn.type_or_null(n);
5129       assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
5130       if (n->is_Type()) {
5131         t = n->as_Type()->type();
5132         assert(t == t->remove_speculative(), "no more speculative types");
5133       }
5134       // Iterate over outs - endless loops is unreachable from below
5135       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5136         Node *m = n->fast_out(i);
5137         if (not_a_node(m)) {
5138           continue;
5139         }
5140         worklist.push(m);
5141       }
5142     }
5143     igvn.check_no_speculative_types();
5144 #endif
5145   }
5146 }
5147 
5148 Node* Compile::optimize_acmp(PhaseGVN* phase, Node* a, Node* b) {
5149   const TypeInstPtr* ta = phase->type(a)->isa_instptr();
5150   const TypeInstPtr* tb = phase->type(b)->isa_instptr();
5151   if (!EnableValhalla || ta == NULL || tb == NULL ||
5152       ta->is_zero_type() || tb->is_zero_type() ||
5153       !ta->can_be_inline_type() || !tb->can_be_inline_type()) {
5154     // Use old acmp if one operand is null or not an inline type
5155     return new CmpPNode(a, b);
5156   } else if (ta->is_inlinetypeptr() || tb->is_inlinetypeptr()) {
5157     // We know that one operand is an inline type. Therefore,
5158     // new acmp will only return true if both operands are NULL.
5159     // Check if both operands are null by or'ing the oops.
5160     a = phase->transform(new CastP2XNode(NULL, a));
5161     b = phase->transform(new CastP2XNode(NULL, b));
5162     a = phase->transform(new OrXNode(a, b));
5163     return new CmpXNode(a, phase->MakeConX(0));
5164   }
5165   // Use new acmp
5166   return NULL;
5167 }
5168 
5169 // Auxiliary methods to support randomized stressing/fuzzing.
5170 
5171 int Compile::random() {
5172   _stress_seed = os::next_random(_stress_seed);
5173   return static_cast<int>(_stress_seed);
5174 }
5175 
5176 // This method can be called the arbitrary number of times, with current count
5177 // as the argument. The logic allows selecting a single candidate from the
5178 // running list of candidates as follows:
5179 //    int count = 0;
5180 //    Cand* selected = null;
5181 //    while(cand = cand->next()) {
5182 //      if (randomized_select(++count)) {
5183 //        selected = cand;
5184 //      }
5185 //    }
5186 //
5187 // Including count equalizes the chances any candidate is "selected".
5188 // This is useful when we don't have the complete list of candidates to choose
< prev index next >