36 #include "gc/shared/barrierSet.hpp"
37 #include "gc/shared/c2/barrierSetC2.hpp"
38 #include "jfr/jfrEvents.hpp"
39 #include "jvm_io.h"
40 #include "memory/allocation.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "opto/addnode.hpp"
43 #include "opto/block.hpp"
44 #include "opto/c2compiler.hpp"
45 #include "opto/callGenerator.hpp"
46 #include "opto/callnode.hpp"
47 #include "opto/castnode.hpp"
48 #include "opto/cfgnode.hpp"
49 #include "opto/chaitin.hpp"
50 #include "opto/compile.hpp"
51 #include "opto/connode.hpp"
52 #include "opto/convertnode.hpp"
53 #include "opto/divnode.hpp"
54 #include "opto/escape.hpp"
55 #include "opto/idealGraphPrinter.hpp"
56 #include "opto/loopnode.hpp"
57 #include "opto/machnode.hpp"
58 #include "opto/macro.hpp"
59 #include "opto/matcher.hpp"
60 #include "opto/mathexactnode.hpp"
61 #include "opto/memnode.hpp"
62 #include "opto/mulnode.hpp"
63 #include "opto/narrowptrnode.hpp"
64 #include "opto/node.hpp"
65 #include "opto/opcodes.hpp"
66 #include "opto/output.hpp"
67 #include "opto/parse.hpp"
68 #include "opto/phaseX.hpp"
69 #include "opto/rootnode.hpp"
70 #include "opto/runtime.hpp"
71 #include "opto/stringopts.hpp"
72 #include "opto/type.hpp"
73 #include "opto/vector.hpp"
74 #include "opto/vectornode.hpp"
75 #include "runtime/globals_extension.hpp"
375 // Constant node that has no out-edges and has only one in-edge from
376 // root is usually dead. However, sometimes reshaping walk makes
377 // it reachable by adding use edges. So, we will NOT count Con nodes
378 // as dead to be conservative about the dead node count at any
379 // given time.
380 if (!dead->is_Con()) {
381 record_dead_node(dead->_idx);
382 }
383 if (dead->is_macro()) {
384 remove_macro_node(dead);
385 }
386 if (dead->is_expensive()) {
387 remove_expensive_node(dead);
388 }
389 if (dead->Opcode() == Op_Opaque4) {
390 remove_skeleton_predicate_opaq(dead);
391 }
392 if (dead->for_post_loop_opts_igvn()) {
393 remove_from_post_loop_opts_igvn(dead);
394 }
395 if (dead->is_Call()) {
396 remove_useless_late_inlines( &_late_inlines, dead);
397 remove_useless_late_inlines( &_string_late_inlines, dead);
398 remove_useless_late_inlines( &_boxing_late_inlines, dead);
399 remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
400
401 if (dead->is_CallStaticJava()) {
402 remove_unstable_if_trap(dead->as_CallStaticJava(), false);
403 }
404 }
405 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
406 bs->unregister_potential_barrier_node(dead);
407 }
408
409 // Disconnect all useless nodes by disconnecting those at the boundary.
410 void Compile::disconnect_useless_nodes(Unique_Node_List &useful, Unique_Node_List* worklist) {
411 uint next = 0;
412 while (next < useful.size()) {
413 Node *n = useful.at(next++);
414 if (n->is_SafePoint()) {
415 // We're done with a parsing phase. Replaced nodes are not valid
416 // beyond that point.
417 n->as_SafePoint()->delete_replaced_nodes();
418 }
419 // Use raw traversal of out edges since this code removes out edges
420 int max = n->outcnt();
421 for (int j = 0; j < max; ++j) {
422 Node* child = n->raw_out(j);
423 if (!useful.member(child)) {
424 assert(!child->is_top() || child != top(),
425 "If top is cached in Compile object it is in useful list");
426 // Only need to remove this out-edge to the useless node
427 n->raw_del_out(j);
428 --j;
429 --max;
430 }
431 }
432 if (n->outcnt() == 1 && n->has_special_unique_user()) {
433 worklist->push(n->unique_out());
434 }
435 }
436
437 remove_useless_nodes(_macro_nodes, useful); // remove useless macro nodes
438 remove_useless_nodes(_predicate_opaqs, useful); // remove useless predicate opaque nodes
439 remove_useless_nodes(_skeleton_predicate_opaqs, useful);
440 remove_useless_nodes(_expensive_nodes, useful); // remove useless expensive nodes
441 remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
442 remove_useless_unstable_if_traps(useful); // remove useless unstable_if traps
443 remove_useless_coarsened_locks(useful); // remove useless coarsened locks nodes
444 #ifdef ASSERT
445 if (_modified_nodes != NULL) {
446 _modified_nodes->remove_useless_nodes(useful.member_set());
447 }
448 #endif
449
450 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
451 bs->eliminate_useless_gc_barriers(useful, this);
452 // clean up the late inline lists
453 remove_useless_late_inlines( &_late_inlines, useful);
454 remove_useless_late_inlines( &_string_late_inlines, useful);
455 remove_useless_late_inlines( &_boxing_late_inlines, useful);
456 remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
457 debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
458 }
459
460 // ============================================================================
461 //------------------------------CompileWrapper---------------------------------
603 _has_reserved_stack_access(target->has_reserved_stack_access()),
604 #ifndef PRODUCT
605 _igv_idx(0),
606 _trace_opto_output(directive->TraceOptoOutputOption),
607 #endif
608 _has_method_handle_invokes(false),
609 _clinit_barrier_on_entry(false),
610 _stress_seed(0),
611 _comp_arena(mtCompiler),
612 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
613 _env(ci_env),
614 _directive(directive),
615 _log(ci_env->log()),
616 _failure_reason(NULL),
617 _intrinsics (comp_arena(), 0, 0, NULL),
618 _macro_nodes (comp_arena(), 8, 0, NULL),
619 _predicate_opaqs (comp_arena(), 8, 0, NULL),
620 _skeleton_predicate_opaqs (comp_arena(), 8, 0, NULL),
621 _expensive_nodes (comp_arena(), 8, 0, NULL),
622 _for_post_loop_igvn(comp_arena(), 8, 0, NULL),
623 _unstable_if_traps (comp_arena(), 8, 0, NULL),
624 _coarsened_locks (comp_arena(), 8, 0, NULL),
625 _congraph(NULL),
626 NOT_PRODUCT(_igv_printer(NULL) COMMA)
627 _dead_node_list(comp_arena()),
628 _dead_node_count(0),
629 _node_arena(mtCompiler),
630 _old_arena(mtCompiler),
631 _mach_constant_base_node(NULL),
632 _Compile_types(mtCompiler),
633 _initial_gvn(NULL),
634 _for_igvn(NULL),
635 _late_inlines(comp_arena(), 2, 0, NULL),
636 _string_late_inlines(comp_arena(), 2, 0, NULL),
637 _boxing_late_inlines(comp_arena(), 2, 0, NULL),
638 _vector_reboxing_late_inlines(comp_arena(), 2, 0, NULL),
639 _late_inlines_pos(0),
640 _number_of_mh_late_inlines(0),
641 _print_inlining_stream(new (mtCompiler) stringStream()),
642 _print_inlining_list(NULL),
707 // Node list that Iterative GVN will start with
708 Unique_Node_List for_igvn(comp_arena());
709 set_for_igvn(&for_igvn);
710
711 // GVN that will be run immediately on new nodes
712 uint estimated_size = method()->code_size()*4+64;
713 estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
714 PhaseGVN gvn(node_arena(), estimated_size);
715 set_initial_gvn(&gvn);
716
717 print_inlining_init();
718 { // Scope for timing the parser
719 TracePhase tp("parse", &timers[_t_parser]);
720
721 // Put top into the hash table ASAP.
722 initial_gvn()->transform_no_reclaim(top());
723
724 // Set up tf(), start(), and find a CallGenerator.
725 CallGenerator* cg = NULL;
726 if (is_osr_compilation()) {
727 const TypeTuple *domain = StartOSRNode::osr_domain();
728 const TypeTuple *range = TypeTuple::make_range(method()->signature());
729 init_tf(TypeFunc::make(domain, range));
730 StartNode* s = new StartOSRNode(root(), domain);
731 initial_gvn()->set_type_bottom(s);
732 init_start(s);
733 cg = CallGenerator::for_osr(method(), entry_bci());
734 } else {
735 // Normal case.
736 init_tf(TypeFunc::make(method()));
737 StartNode* s = new StartNode(root(), tf()->domain());
738 initial_gvn()->set_type_bottom(s);
739 init_start(s);
740 if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
741 // With java.lang.ref.reference.get() we must go through the
742 // intrinsic - even when get() is the root
743 // method of the compile - so that, if necessary, the value in
744 // the referent field of the reference object gets recorded by
745 // the pre-barrier code.
746 cg = find_intrinsic(method(), false);
747 }
748 if (cg == NULL) {
749 float past_uses = method()->interpreter_invocation_count();
750 float expected_uses = past_uses;
751 cg = CallGenerator::for_inline(method(), expected_uses);
752 }
753 }
754 if (failing()) return;
755 if (cg == NULL) {
756 record_method_not_compilable("cannot parse method");
757 return;
836 print_ideal_ir("print_ideal");
837 }
838 #endif
839
840 #ifdef ASSERT
841 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
842 bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
843 #endif
844
845 // Dump compilation data to replay it.
846 if (directive->DumpReplayOption) {
847 env()->dump_replay_data(_compile_id);
848 }
849 if (directive->DumpInlineOption && (ilt() != NULL)) {
850 env()->dump_inline_data(_compile_id);
851 }
852
853 // Now that we know the size of all the monitors we can add a fixed slot
854 // for the original deopt pc.
855 int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
856 set_fixed_slots(next_slot);
857
858 // Compute when to use implicit null checks. Used by matching trap based
859 // nodes and NullCheck optimization.
860 set_allowed_deopt_reasons();
861
862 // Now generate code
863 Code_Gen();
864 }
865
866 //------------------------------Compile----------------------------------------
867 // Compile a runtime stub
868 Compile::Compile( ciEnv* ci_env,
869 TypeFunc_generator generator,
870 address stub_function,
871 const char *stub_name,
872 int is_fancy_jump,
873 bool pass_tls,
874 bool return_pc,
875 DirectiveSet* directive)
988 // Create Debug Information Recorder to record scopes, oopmaps, etc.
989 env()->set_oop_recorder(new OopRecorder(env()->arena()));
990 env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
991 env()->set_dependencies(new Dependencies(env()));
992
993 _fixed_slots = 0;
994 set_has_split_ifs(false);
995 set_has_loops(false); // first approximation
996 set_has_stringbuilder(false);
997 set_has_boxed_value(false);
998 _trap_can_recompile = false; // no traps emitted yet
999 _major_progress = true; // start out assuming good things will happen
1000 set_has_unsafe_access(false);
1001 set_max_vector_size(0);
1002 set_clear_upper_avx(false); //false as default for clear upper bits of ymm registers
1003 Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1004 set_decompile_count(0);
1005
1006 set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
1007 _loop_opts_cnt = LoopOptsCount;
1008 set_do_inlining(Inline);
1009 set_max_inline_size(MaxInlineSize);
1010 set_freq_inline_size(FreqInlineSize);
1011 set_do_scheduling(OptoScheduling);
1012
1013 set_do_vector_loop(false);
1014 set_has_monitors(false);
1015
1016 if (AllowVectorizeOnDemand) {
1017 if (has_method() && (_directive->VectorizeOption || _directive->VectorizeDebugOption)) {
1018 set_do_vector_loop(true);
1019 NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
1020 } else if (has_method() && method()->name() != 0 &&
1021 method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1022 set_do_vector_loop(true);
1023 }
1024 }
1025 set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1026 NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
1027
1282 // If this method has already thrown a range-check,
1283 // assume it was because we already tried range smearing
1284 // and it failed.
1285 uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1286 return !already_trapped;
1287 }
1288
1289
1290 //------------------------------flatten_alias_type-----------------------------
1291 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1292 assert(do_aliasing(), "Aliasing should be enabled");
1293 int offset = tj->offset();
1294 TypePtr::PTR ptr = tj->ptr();
1295
1296 // Known instance (scalarizable allocation) alias only with itself.
1297 bool is_known_inst = tj->isa_oopptr() != NULL &&
1298 tj->is_oopptr()->is_known_instance();
1299
1300 // Process weird unsafe references.
1301 if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1302 assert(InlineUnsafeOps || StressReflectiveCode, "indeterminate pointers come only from unsafe ops");
1303 assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1304 tj = TypeOopPtr::BOTTOM;
1305 ptr = tj->ptr();
1306 offset = tj->offset();
1307 }
1308
1309 // Array pointers need some flattening
1310 const TypeAryPtr* ta = tj->isa_aryptr();
1311 if (ta && ta->is_stable()) {
1312 // Erase stability property for alias analysis.
1313 tj = ta = ta->cast_to_stable(false);
1314 }
1315 if( ta && is_known_inst ) {
1316 if ( offset != Type::OffsetBot &&
1317 offset > arrayOopDesc::length_offset_in_bytes() ) {
1318 offset = Type::OffsetBot; // Flatten constant access into array body only
1319 tj = ta = ta->
1320 remove_speculative()->
1321 cast_to_ptr_type(ptr)->
1322 with_offset(offset);
1323 }
1324 } else if (ta) {
1325 // For arrays indexed by constant indices, we flatten the alias
1326 // space to include all of the array body. Only the header, klass
1327 // and array length can be accessed un-aliased.
1328 if( offset != Type::OffsetBot ) {
1329 if( ta->const_oop() ) { // MethodData* or Method*
1330 offset = Type::OffsetBot; // Flatten constant access into array body
1331 tj = ta = ta->
1332 remove_speculative()->
1333 cast_to_ptr_type(ptr)->
1334 cast_to_exactness(false)->
1335 with_offset(offset);
1336 } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1337 // range is OK as-is.
1338 tj = ta = TypeAryPtr::RANGE;
1339 } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1340 tj = TypeInstPtr::KLASS; // all klass loads look alike
1341 ta = TypeAryPtr::RANGE; // generic ignored junk
1342 ptr = TypePtr::BotPTR;
1343 } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1344 tj = TypeInstPtr::MARK;
1345 ta = TypeAryPtr::RANGE; // generic ignored junk
1346 ptr = TypePtr::BotPTR;
1347 } else { // Random constant offset into array body
1348 offset = Type::OffsetBot; // Flatten constant access into array body
1349 tj = ta = ta->
1350 remove_speculative()->
1351 cast_to_ptr_type(ptr)->
1352 cast_to_exactness(false)->
1353 with_offset(offset);
1354 }
1355 }
1356 // Arrays of fixed size alias with arrays of unknown size.
1357 if (ta->size() != TypeInt::POS) {
1358 const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1359 tj = ta = ta->
1360 remove_speculative()->
1361 cast_to_ptr_type(ptr)->
1362 with_ary(tary)->
1363 cast_to_exactness(false);
1364 }
1365 // Arrays of known objects become arrays of unknown objects.
1366 if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1367 const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1368 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1369 }
1370 if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1371 const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1372 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1373 }
1374 // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1375 // cannot be distinguished by bytecode alone.
1376 if (ta->elem() == TypeInt::BOOL) {
1377 const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1378 ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1379 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
1380 }
1381 // During the 2nd round of IterGVN, NotNull castings are removed.
1382 // Make sure the Bottom and NotNull variants alias the same.
1383 // Also, make sure exact and non-exact variants alias the same.
1384 if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
1385 tj = ta = ta->
1386 remove_speculative()->
1387 cast_to_ptr_type(TypePtr::BotPTR)->
1388 cast_to_exactness(false)->
1389 with_offset(offset);
1390 }
1391 }
1392
1393 // Oop pointers need some flattening
1394 const TypeInstPtr *to = tj->isa_instptr();
1395 if (to && to != TypeOopPtr::BOTTOM) {
1396 ciInstanceKlass* ik = to->instance_klass();
1397 if( ptr == TypePtr::Constant ) {
1398 if (ik != ciEnv::current()->Class_klass() ||
1399 offset < ik->layout_helper_size_in_bytes()) {
1409 } else if( is_known_inst ) {
1410 tj = to; // Keep NotNull and klass_is_exact for instance type
1411 } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1412 // During the 2nd round of IterGVN, NotNull castings are removed.
1413 // Make sure the Bottom and NotNull variants alias the same.
1414 // Also, make sure exact and non-exact variants alias the same.
1415 tj = to = to->
1416 remove_speculative()->
1417 cast_to_instance_id(TypeOopPtr::InstanceBot)->
1418 cast_to_ptr_type(TypePtr::BotPTR)->
1419 cast_to_exactness(false);
1420 }
1421 if (to->speculative() != NULL) {
1422 tj = to = to->remove_speculative();
1423 }
1424 // Canonicalize the holder of this field
1425 if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1426 // First handle header references such as a LoadKlassNode, even if the
1427 // object's klass is unloaded at compile time (4965979).
1428 if (!is_known_inst) { // Do it only for non-instance types
1429 tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
1430 }
1431 } else if (offset < 0 || offset >= ik->layout_helper_size_in_bytes()) {
1432 // Static fields are in the space above the normal instance
1433 // fields in the java.lang.Class instance.
1434 if (ik != ciEnv::current()->Class_klass()) {
1435 to = NULL;
1436 tj = TypeOopPtr::BOTTOM;
1437 offset = tj->offset();
1438 }
1439 } else {
1440 ciInstanceKlass *canonical_holder = ik->get_canonical_holder(offset);
1441 assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1442 if (!ik->equals(canonical_holder) || tj->offset() != offset) {
1443 if( is_known_inst ) {
1444 tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
1445 } else {
1446 tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
1447 }
1448 }
1449 }
1450 }
1451
1452 // Klass pointers to object array klasses need some flattening
1453 const TypeKlassPtr *tk = tj->isa_klassptr();
1454 if( tk ) {
1455 // If we are referencing a field within a Klass, we need
1456 // to assume the worst case of an Object. Both exact and
1457 // inexact types must flatten to the same alias class so
1458 // use NotNull as the PTR.
1459 if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1460 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull,
1461 env()->Object_klass(),
1462 offset);
1463 }
1464
1465 if (tk->isa_aryklassptr() && tk->is_aryklassptr()->elem()->isa_klassptr()) {
1466 ciKlass* k = ciObjArrayKlass::make(env()->Object_klass());
1467 if (!k || !k->is_loaded()) { // Only fails for some -Xcomp runs
1468 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull, env()->Object_klass(), offset);
1469 } else {
1470 tj = tk = TypeAryKlassPtr::make(TypePtr::NotNull, tk->is_aryklassptr()->elem(), k, offset);
1471 }
1472 }
1473
1474 // Check for precise loads from the primary supertype array and force them
1475 // to the supertype cache alias index. Check for generic array loads from
1476 // the primary supertype array and also force them to the supertype cache
1477 // alias index. Since the same load can reach both, we need to merge
1478 // these 2 disparate memories into the same alias class. Since the
1479 // primary supertype array is read-only, there's no chance of confusion
1480 // where we bypass an array load and an array store.
1481 int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1482 if (offset == Type::OffsetBot ||
1483 (offset >= primary_supers_offset &&
1484 offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1485 offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1486 offset = in_bytes(Klass::secondary_super_cache_offset());
1487 tj = tk = tk->with_offset(offset);
1488 }
1489 }
1490
1583 intptr_t key = (intptr_t) adr_type;
1584 key ^= key >> logAliasCacheSize;
1585 return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1586 }
1587
1588
1589 //-----------------------------grow_alias_types--------------------------------
1590 void Compile::grow_alias_types() {
1591 const int old_ats = _max_alias_types; // how many before?
1592 const int new_ats = old_ats; // how many more?
1593 const int grow_ats = old_ats+new_ats; // how many now?
1594 _max_alias_types = grow_ats;
1595 _alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1596 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1597 Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1598 for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i];
1599 }
1600
1601
1602 //--------------------------------find_alias_type------------------------------
1603 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
1604 if (!do_aliasing()) {
1605 return alias_type(AliasIdxBot);
1606 }
1607
1608 AliasCacheEntry* ace = probe_alias_cache(adr_type);
1609 if (ace->_adr_type == adr_type) {
1610 return alias_type(ace->_index);
1611 }
1612
1613 // Handle special cases.
1614 if (adr_type == NULL) return alias_type(AliasIdxTop);
1615 if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot);
1616
1617 // Do it the slow way.
1618 const TypePtr* flat = flatten_alias_type(adr_type);
1619
1620 #ifdef ASSERT
1621 {
1622 ResourceMark rm;
1623 assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1624 Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1625 assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1626 Type::str(adr_type));
1627 if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1628 const TypeOopPtr* foop = flat->is_oopptr();
1629 // Scalarizable allocations have exact klass always.
1630 bool exact = !foop->klass_is_exact() || foop->is_known_instance();
1640 if (alias_type(i)->adr_type() == flat) {
1641 idx = i;
1642 break;
1643 }
1644 }
1645
1646 if (idx == AliasIdxTop) {
1647 if (no_create) return NULL;
1648 // Grow the array if necessary.
1649 if (_num_alias_types == _max_alias_types) grow_alias_types();
1650 // Add a new alias type.
1651 idx = _num_alias_types++;
1652 _alias_types[idx]->Init(idx, flat);
1653 if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false);
1654 if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false);
1655 if (flat->isa_instptr()) {
1656 if (flat->offset() == java_lang_Class::klass_offset()
1657 && flat->is_instptr()->instance_klass() == env()->Class_klass())
1658 alias_type(idx)->set_rewritable(false);
1659 }
1660 if (flat->isa_aryptr()) {
1661 #ifdef ASSERT
1662 const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1663 // (T_BYTE has the weakest alignment and size restrictions...)
1664 assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1665 #endif
1666 if (flat->offset() == TypePtr::OffsetBot) {
1667 alias_type(idx)->set_element(flat->is_aryptr()->elem());
1668 }
1669 }
1670 if (flat->isa_klassptr()) {
1671 if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1672 alias_type(idx)->set_rewritable(false);
1673 if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1674 alias_type(idx)->set_rewritable(false);
1675 if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1676 alias_type(idx)->set_rewritable(false);
1677 if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1678 alias_type(idx)->set_rewritable(false);
1679 if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1680 alias_type(idx)->set_rewritable(false);
1681 }
1682 // %%% (We would like to finalize JavaThread::threadObj_offset(),
1683 // but the base pointer type is not distinctive enough to identify
1684 // references into JavaThread.)
1685
1686 // Check for final fields.
1687 const TypeInstPtr* tinst = flat->isa_instptr();
1688 if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1689 ciField* field;
1690 if (tinst->const_oop() != NULL &&
1691 tinst->instance_klass() == ciEnv::current()->Class_klass() &&
1692 tinst->offset() >= (tinst->instance_klass()->layout_helper_size_in_bytes())) {
1693 // static field
1694 ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1695 field = k->get_field_by_offset(tinst->offset(), true);
1696 } else {
1697 ciInstanceKlass *k = tinst->instance_klass();
1698 field = k->get_field_by_offset(tinst->offset(), false);
1699 }
1700 assert(field == NULL ||
1701 original_field == NULL ||
1702 (field->holder() == original_field->holder() &&
1703 field->offset() == original_field->offset() &&
1704 field->is_static() == original_field->is_static()), "wrong field?");
1705 // Set field() and is_rewritable() attributes.
1706 if (field != NULL) alias_type(idx)->set_field(field);
1707 }
1708 }
1709
1710 // Fill the cache for next time.
1711 ace->_adr_type = adr_type;
1712 ace->_index = idx;
1713 assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
1714
1715 // Might as well try to fill the cache for the flattened version, too.
1716 AliasCacheEntry* face = probe_alias_cache(flat);
1717 if (face->_adr_type == NULL) {
1718 face->_adr_type = flat;
1719 face->_index = idx;
1720 assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1721 }
1722
1723 return alias_type(idx);
1724 }
1725
1726
1727 Compile::AliasType* Compile::alias_type(ciField* field) {
1728 const TypeOopPtr* t;
1729 if (field->is_static())
1730 t = TypeInstPtr::make(field->holder()->java_mirror());
1731 else
1732 t = TypeOopPtr::make_from_klass_raw(field->holder());
1733 AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1734 assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1735 return atp;
1736 }
1737
1738
1739 //------------------------------have_alias_type--------------------------------
1740 bool Compile::have_alias_type(const TypePtr* adr_type) {
1817 C->set_post_loop_opts_phase(); // no more loop opts allowed
1818
1819 assert(!C->major_progress(), "not cleared");
1820
1821 if (_for_post_loop_igvn.length() > 0) {
1822 while (_for_post_loop_igvn.length() > 0) {
1823 Node* n = _for_post_loop_igvn.pop();
1824 n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1825 igvn._worklist.push(n);
1826 }
1827 igvn.optimize();
1828 assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1829
1830 // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1831 if (C->major_progress()) {
1832 C->clear_major_progress(); // ensure that major progress is now clear
1833 }
1834 }
1835 }
1836
1837 void Compile::record_unstable_if_trap(UnstableIfTrap* trap) {
1838 if (OptimizeUnstableIf) {
1839 _unstable_if_traps.append(trap);
1840 }
1841 }
1842
1843 void Compile::remove_useless_unstable_if_traps(Unique_Node_List& useful) {
1844 for (int i = _unstable_if_traps.length() - 1; i >= 0; i--) {
1845 UnstableIfTrap* trap = _unstable_if_traps.at(i);
1846 Node* n = trap->uncommon_trap();
1847 if (!useful.member(n)) {
1848 _unstable_if_traps.delete_at(i); // replaces i-th with last element which is known to be useful (already processed)
1849 }
1850 }
1851 }
1852
1853 // Remove the unstable if trap associated with 'unc' from candidates. It is either dead
1854 // or fold-compares case. Return true if succeed or not found.
1855 //
1856 // In rare cases, the found trap has been processed. It is too late to delete it. Return
2106 assert(has_stringbuilder(), "inconsistent");
2107 for_igvn()->clear();
2108 initial_gvn()->replace_with(&igvn);
2109
2110 inline_string_calls(false);
2111
2112 if (failing()) return;
2113
2114 inline_incrementally_cleanup(igvn);
2115 }
2116
2117 set_inlining_incrementally(false);
2118 }
2119
2120 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2121 // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2122 // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2123 // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == NULL"
2124 // as if "inlining_incrementally() == true" were set.
2125 assert(inlining_incrementally() == false, "not allowed");
2126 assert(_modified_nodes == NULL, "not allowed");
2127 assert(_late_inlines.length() > 0, "sanity");
2128
2129 while (_late_inlines.length() > 0) {
2130 for_igvn()->clear();
2131 initial_gvn()->replace_with(&igvn);
2132
2133 while (inline_incrementally_one()) {
2134 assert(!failing(), "inconsistent");
2135 }
2136 if (failing()) return;
2137
2138 inline_incrementally_cleanup(igvn);
2139 }
2140 }
2141
2142 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2143 if (_loop_opts_cnt > 0) {
2144 while (major_progress() && (_loop_opts_cnt > 0)) {
2145 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2146 PhaseIdealLoop::optimize(igvn, mode);
2147 _loop_opts_cnt--;
2148 if (failing()) return false;
2149 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2150 }
2151 }
2152 return true;
2153 }
2154
2155 // Remove edges from "root" to each SafePoint at a backward branch.
2156 // They were inserted during parsing (see add_safepoint()) to make
2157 // infinite loops without calls or exceptions visible to root, i.e.,
2158 // useful.
2159 void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) {
2263 Compile::TracePhase tp("", &timers[_t_renumberLive]);
2264 initial_gvn()->replace_with(&igvn);
2265 Unique_Node_List* old_worklist = for_igvn();
2266 old_worklist->clear();
2267 Unique_Node_List new_worklist(C->comp_arena());
2268 {
2269 ResourceMark rm;
2270 PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2271 }
2272 Unique_Node_List* save_for_igvn = for_igvn();
2273 set_for_igvn(&new_worklist);
2274 igvn = PhaseIterGVN(initial_gvn());
2275 igvn.optimize();
2276 set_for_igvn(old_worklist); // new_worklist is dead beyond this point
2277 }
2278
2279 // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
2280 // safepoints
2281 remove_root_to_sfpts_edges(igvn);
2282
2283 // Perform escape analysis
2284 if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) {
2285 if (has_loops()) {
2286 // Cleanup graph (remove dead nodes).
2287 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2288 PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2289 if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2290 if (failing()) return;
2291 }
2292 bool progress;
2293 do {
2294 ConnectionGraph::do_analysis(this, &igvn);
2295
2296 if (failing()) return;
2297
2298 int mcount = macro_count(); // Record number of allocations and locks before IGVN
2299
2300 // Optimize out fields loads from scalar replaceable allocations.
2301 igvn.optimize();
2302 print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2376 print_method(PHASE_ITER_GVN2, 2);
2377
2378 if (failing()) return;
2379
2380 // Loop transforms on the ideal graph. Range Check Elimination,
2381 // peeling, unrolling, etc.
2382 if (!optimize_loops(igvn, LoopOptsDefault)) {
2383 return;
2384 }
2385
2386 if (failing()) return;
2387
2388 C->clear_major_progress(); // ensure that major progress is now clear
2389
2390 process_for_post_loop_opts_igvn(igvn);
2391
2392 #ifdef ASSERT
2393 bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2394 #endif
2395
2396 {
2397 TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2398 PhaseMacroExpand mex(igvn);
2399 if (mex.expand_macro_nodes()) {
2400 assert(failing(), "must bail out w/ explicit message");
2401 return;
2402 }
2403 print_method(PHASE_MACRO_EXPANSION, 2);
2404 }
2405
2406 {
2407 TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
2408 if (bs->expand_barriers(this, igvn)) {
2409 assert(failing(), "must bail out w/ explicit message");
2410 return;
2411 }
2412 print_method(PHASE_BARRIER_EXPANSION, 2);
2413 }
2414
2415 if (C->max_vector_size() > 0) {
2416 C->optimize_logic_cones(igvn);
2417 igvn.optimize();
2418 }
2419
2420 DEBUG_ONLY( _modified_nodes = NULL; )
2421
2422 assert(igvn._worklist.size() == 0, "not empty");
2423
2424 assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2425
2426 if (_late_inlines.length() > 0) {
2427 // More opportunities to optimize virtual and MH calls.
2428 // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2429 process_late_inline_calls_no_inline(igvn);
2430 }
2431 } // (End scope of igvn; run destructor if necessary for asserts.)
2432
2433 check_no_dead_use();
2434
2435 process_print_inlining();
2436
2437 // A method with only infinite loops has no edges entering loops from root
2438 {
2439 TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2440 if (final_graph_reshaping()) {
2441 assert(failing(), "must bail out w/ explicit message");
2442 return;
2443 }
2444 }
2445
2446 print_method(PHASE_OPTIMIZE_FINISHED, 2);
2447 DEBUG_ONLY(set_phase_optimize_finished();)
2448 }
2449
2450 #ifdef ASSERT
3035 // Accumulate any precedence edges
3036 if (mem->in(i) != NULL) {
3037 n->add_prec(mem->in(i));
3038 }
3039 }
3040 // Everything above this point has been processed.
3041 done = true;
3042 }
3043 // Eliminate the previous StoreCM
3044 prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
3045 assert(mem->outcnt() == 0, "should be dead");
3046 mem->disconnect_inputs(this);
3047 } else {
3048 prev = mem;
3049 }
3050 mem = prev->in(MemNode::Memory);
3051 }
3052 }
3053 }
3054
3055 //------------------------------final_graph_reshaping_impl----------------------
3056 // Implement items 1-5 from final_graph_reshaping below.
3057 void Compile::final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
3058
3059 if ( n->outcnt() == 0 ) return; // dead node
3060 uint nop = n->Opcode();
3061
3062 // Check for 2-input instruction with "last use" on right input.
3063 // Swap to left input. Implements item (2).
3064 if( n->req() == 3 && // two-input instruction
3065 n->in(1)->outcnt() > 1 && // left use is NOT a last use
3066 (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
3067 n->in(2)->outcnt() == 1 &&// right use IS a last use
3068 !n->in(2)->is_Con() ) { // right use is not a constant
3069 // Check for commutative opcode
3070 switch( nop ) {
3071 case Op_AddI: case Op_AddF: case Op_AddD: case Op_AddL:
3072 case Op_MaxI: case Op_MaxL: case Op_MaxF: case Op_MaxD:
3073 case Op_MinI: case Op_MinL: case Op_MinF: case Op_MinD:
3074 case Op_MulI: case Op_MulF: case Op_MulD: case Op_MulL:
3187 if (n->outcnt() > 1 &&
3188 !n->is_Proj() &&
3189 nop != Op_CreateEx &&
3190 nop != Op_CheckCastPP &&
3191 nop != Op_DecodeN &&
3192 nop != Op_DecodeNKlass &&
3193 !n->is_Mem() &&
3194 !n->is_Phi()) {
3195 Node *x = n->clone();
3196 call->set_req(TypeFunc::Parms, x);
3197 }
3198 }
3199 break;
3200 }
3201
3202 case Op_StoreCM:
3203 {
3204 // Convert OopStore dependence into precedence edge
3205 Node* prec = n->in(MemNode::OopStore);
3206 n->del_req(MemNode::OopStore);
3207 n->add_prec(prec);
3208 eliminate_redundant_card_marks(n);
3209 }
3210
3211 // fall through
3212
3213 case Op_StoreB:
3214 case Op_StoreC:
3215 case Op_StoreI:
3216 case Op_StoreL:
3217 case Op_CompareAndSwapB:
3218 case Op_CompareAndSwapS:
3219 case Op_CompareAndSwapI:
3220 case Op_CompareAndSwapL:
3221 case Op_CompareAndSwapP:
3222 case Op_CompareAndSwapN:
3223 case Op_WeakCompareAndSwapB:
3224 case Op_WeakCompareAndSwapS:
3225 case Op_WeakCompareAndSwapI:
3226 case Op_WeakCompareAndSwapL:
3227 case Op_WeakCompareAndSwapP:
3783 // Replace all nodes with identical edges as m with m
3784 k->subsume_by(m, this);
3785 }
3786 }
3787 }
3788 break;
3789 }
3790 case Op_CmpUL: {
3791 if (!Matcher::has_match_rule(Op_CmpUL)) {
3792 // No support for unsigned long comparisons
3793 ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3794 Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3795 Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3796 ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3797 Node* andl = new AndLNode(orl, remove_sign_mask);
3798 Node* cmp = new CmpLNode(andl, n->in(2));
3799 n->subsume_by(cmp, this);
3800 }
3801 break;
3802 }
3803 default:
3804 assert(!n->is_Call(), "");
3805 assert(!n->is_Mem(), "");
3806 assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3807 break;
3808 }
3809 }
3810
3811 //------------------------------final_graph_reshaping_walk---------------------
3812 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3813 // requires that the walk visits a node's inputs before visiting the node.
3814 void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
3815 Unique_Node_List sfpt;
3816
3817 frc._visited.set(root->_idx); // first, mark node as visited
3818 uint cnt = root->req();
3819 Node *n = root;
3820 uint i = 0;
3821 while (true) {
3822 if (i < cnt) {
4159 }
4160 }
4161
4162 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4163 return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4164 }
4165
4166 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4167 return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4168 }
4169
4170 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4171 if (holder->is_initialized()) {
4172 return false;
4173 }
4174 if (holder->is_being_initialized()) {
4175 if (accessing_method->holder() == holder) {
4176 // Access inside a class. The barrier can be elided when access happens in <clinit>,
4177 // <init>, or a static method. In all those cases, there was an initialization
4178 // barrier on the holder klass passed.
4179 if (accessing_method->is_static_initializer() ||
4180 accessing_method->is_object_initializer() ||
4181 accessing_method->is_static()) {
4182 return false;
4183 }
4184 } else if (accessing_method->holder()->is_subclass_of(holder)) {
4185 // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4186 // In case of <init> or a static method, the barrier is on the subclass is not enough:
4187 // child class can become fully initialized while its parent class is still being initialized.
4188 if (accessing_method->is_static_initializer()) {
4189 return false;
4190 }
4191 }
4192 ciMethod* root = method(); // the root method of compilation
4193 if (root != accessing_method) {
4194 return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4195 }
4196 }
4197 return true;
4198 }
4199
4200 #ifndef PRODUCT
4201 //------------------------------verify_bidirectional_edges---------------------
4202 // For each input edge to a node (ie - for each Use-Def edge), verify that
4203 // there is a corresponding Def-Use edge.
4204 void Compile::verify_bidirectional_edges(Unique_Node_List &visited) {
4205 // Allocate stack of size C->live_nodes()/16 to avoid frequent realloc
4206 uint stack_size = live_nodes() >> 4;
4207 Node_List nstack(MAX2(stack_size, (uint)OptoNodeListSize));
4208 nstack.push(_root);
4224 if (in != NULL && !in->is_top()) {
4225 // Count instances of `next`
4226 int cnt = 0;
4227 for (uint idx = 0; idx < in->_outcnt; idx++) {
4228 if (in->_out[idx] == n) {
4229 cnt++;
4230 }
4231 }
4232 assert(cnt > 0, "Failed to find Def-Use edge.");
4233 // Check for duplicate edges
4234 // walk the input array downcounting the input edges to n
4235 for (uint j = 0; j < length; j++) {
4236 if (n->in(j) == in) {
4237 cnt--;
4238 }
4239 }
4240 assert(cnt == 0, "Mismatched edge count.");
4241 } else if (in == NULL) {
4242 assert(i == 0 || i >= n->req() ||
4243 n->is_Region() || n->is_Phi() || n->is_ArrayCopy() ||
4244 (n->is_Unlock() && i == (n->req() - 1)) ||
4245 (n->is_MemBar() && i == 5), // the precedence edge to a membar can be removed during macro node expansion
4246 "only region, phi, arraycopy, unlock or membar nodes have null data edges");
4247 } else {
4248 assert(in->is_top(), "sanity");
4249 // Nothing to check.
4250 }
4251 }
4252 }
4253 }
4254
4255 //------------------------------verify_graph_edges---------------------------
4256 // Walk the Graph and verify that there is a one-to-one correspondence
4257 // between Use-Def edges and Def-Use edges in the graph.
4258 void Compile::verify_graph_edges(bool no_dead_code) {
4259 if (VerifyGraphEdges) {
4260 Unique_Node_List visited;
4261
4262 // Call graph walk to check edges
4263 verify_bidirectional_edges(visited);
4264 if (no_dead_code) {
4265 // Now make sure that no visited node is used by an unvisited node.
4266 bool dead_nodes = false;
4360 // (1) subklass is already limited to a subtype of superklass => always ok
4361 // (2) subklass does not overlap with superklass => always fail
4362 // (3) superklass has NO subtypes and we can check with a simple compare.
4363 Compile::SubTypeCheckResult Compile::static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk) {
4364 if (StressReflectiveCode) {
4365 return SSC_full_test; // Let caller generate the general case.
4366 }
4367
4368 if (subk->is_java_subtype_of(superk)) {
4369 return SSC_always_true; // (0) and (1) this test cannot fail
4370 }
4371
4372 if (!subk->maybe_java_subtype_of(superk)) {
4373 return SSC_always_false; // (2) true path dead; no dynamic test needed
4374 }
4375
4376 const Type* superelem = superk;
4377 if (superk->isa_aryklassptr()) {
4378 int ignored;
4379 superelem = superk->is_aryklassptr()->base_element_type(ignored);
4380 }
4381
4382 if (superelem->isa_instklassptr()) {
4383 ciInstanceKlass* ik = superelem->is_instklassptr()->instance_klass();
4384 if (!ik->has_subklass()) {
4385 if (!ik->is_final()) {
4386 // Add a dependency if there is a chance of a later subclass.
4387 dependencies()->assert_leaf_type(ik);
4388 }
4389 if (!superk->maybe_java_subtype_of(subk)) {
4390 return SSC_always_false;
4391 }
4392 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4393 }
4394 } else {
4395 // A primitive array type has no subtypes.
4396 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4397 }
4398
4399 return SSC_full_test;
4911 const Type* t = igvn.type_or_null(n);
4912 assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
4913 if (n->is_Type()) {
4914 t = n->as_Type()->type();
4915 assert(t == t->remove_speculative(), "no more speculative types");
4916 }
4917 // Iterate over outs - endless loops is unreachable from below
4918 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4919 Node *m = n->fast_out(i);
4920 if (not_a_node(m)) {
4921 continue;
4922 }
4923 worklist.push(m);
4924 }
4925 }
4926 igvn.check_no_speculative_types();
4927 #endif
4928 }
4929 }
4930
4931 // Auxiliary methods to support randomized stressing/fuzzing.
4932
4933 int Compile::random() {
4934 _stress_seed = os::next_random(_stress_seed);
4935 return static_cast<int>(_stress_seed);
4936 }
4937
4938 // This method can be called the arbitrary number of times, with current count
4939 // as the argument. The logic allows selecting a single candidate from the
4940 // running list of candidates as follows:
4941 // int count = 0;
4942 // Cand* selected = null;
4943 // while(cand = cand->next()) {
4944 // if (randomized_select(++count)) {
4945 // selected = cand;
4946 // }
4947 // }
4948 //
4949 // Including count equalizes the chances any candidate is "selected".
4950 // This is useful when we don't have the complete list of candidates to choose
|
36 #include "gc/shared/barrierSet.hpp"
37 #include "gc/shared/c2/barrierSetC2.hpp"
38 #include "jfr/jfrEvents.hpp"
39 #include "jvm_io.h"
40 #include "memory/allocation.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "opto/addnode.hpp"
43 #include "opto/block.hpp"
44 #include "opto/c2compiler.hpp"
45 #include "opto/callGenerator.hpp"
46 #include "opto/callnode.hpp"
47 #include "opto/castnode.hpp"
48 #include "opto/cfgnode.hpp"
49 #include "opto/chaitin.hpp"
50 #include "opto/compile.hpp"
51 #include "opto/connode.hpp"
52 #include "opto/convertnode.hpp"
53 #include "opto/divnode.hpp"
54 #include "opto/escape.hpp"
55 #include "opto/idealGraphPrinter.hpp"
56 #include "opto/inlinetypenode.hpp"
57 #include "opto/loopnode.hpp"
58 #include "opto/machnode.hpp"
59 #include "opto/macro.hpp"
60 #include "opto/matcher.hpp"
61 #include "opto/mathexactnode.hpp"
62 #include "opto/memnode.hpp"
63 #include "opto/mulnode.hpp"
64 #include "opto/narrowptrnode.hpp"
65 #include "opto/node.hpp"
66 #include "opto/opcodes.hpp"
67 #include "opto/output.hpp"
68 #include "opto/parse.hpp"
69 #include "opto/phaseX.hpp"
70 #include "opto/rootnode.hpp"
71 #include "opto/runtime.hpp"
72 #include "opto/stringopts.hpp"
73 #include "opto/type.hpp"
74 #include "opto/vector.hpp"
75 #include "opto/vectornode.hpp"
76 #include "runtime/globals_extension.hpp"
376 // Constant node that has no out-edges and has only one in-edge from
377 // root is usually dead. However, sometimes reshaping walk makes
378 // it reachable by adding use edges. So, we will NOT count Con nodes
379 // as dead to be conservative about the dead node count at any
380 // given time.
381 if (!dead->is_Con()) {
382 record_dead_node(dead->_idx);
383 }
384 if (dead->is_macro()) {
385 remove_macro_node(dead);
386 }
387 if (dead->is_expensive()) {
388 remove_expensive_node(dead);
389 }
390 if (dead->Opcode() == Op_Opaque4) {
391 remove_skeleton_predicate_opaq(dead);
392 }
393 if (dead->for_post_loop_opts_igvn()) {
394 remove_from_post_loop_opts_igvn(dead);
395 }
396 if (dead->is_InlineType()) {
397 remove_inline_type(dead);
398 }
399 if (dead->is_Call()) {
400 remove_useless_late_inlines( &_late_inlines, dead);
401 remove_useless_late_inlines( &_string_late_inlines, dead);
402 remove_useless_late_inlines( &_boxing_late_inlines, dead);
403 remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
404
405 if (dead->is_CallStaticJava()) {
406 remove_unstable_if_trap(dead->as_CallStaticJava(), false);
407 }
408 }
409 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
410 bs->unregister_potential_barrier_node(dead);
411 }
412
413 // Disconnect all useless nodes by disconnecting those at the boundary.
414 void Compile::disconnect_useless_nodes(Unique_Node_List &useful, Unique_Node_List* worklist) {
415 uint next = 0;
416 while (next < useful.size()) {
417 Node *n = useful.at(next++);
418 if (n->is_SafePoint()) {
419 // We're done with a parsing phase. Replaced nodes are not valid
420 // beyond that point.
421 n->as_SafePoint()->delete_replaced_nodes();
422 }
423 // Use raw traversal of out edges since this code removes out edges
424 int max = n->outcnt();
425 for (int j = 0; j < max; ++j) {
426 Node* child = n->raw_out(j);
427 if (!useful.member(child)) {
428 assert(!child->is_top() || child != top(),
429 "If top is cached in Compile object it is in useful list");
430 // Only need to remove this out-edge to the useless node
431 n->raw_del_out(j);
432 --j;
433 --max;
434 }
435 }
436 if (n->outcnt() == 1 && n->has_special_unique_user()) {
437 worklist->push(n->unique_out());
438 }
439 if (n->outcnt() == 0) {
440 worklist->push(n);
441 }
442 }
443
444 remove_useless_nodes(_macro_nodes, useful); // remove useless macro nodes
445 remove_useless_nodes(_predicate_opaqs, useful); // remove useless predicate opaque nodes
446 remove_useless_nodes(_skeleton_predicate_opaqs, useful);
447 remove_useless_nodes(_expensive_nodes, useful); // remove useless expensive nodes
448 remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
449 remove_useless_nodes(_inline_type_nodes, useful); // remove useless inline type nodes
450 #ifdef ASSERT
451 if (_modified_nodes != NULL) {
452 _modified_nodes->remove_useless_nodes(useful.member_set());
453 }
454 #endif
455 remove_useless_unstable_if_traps(useful); // remove useless unstable_if traps
456 remove_useless_coarsened_locks(useful); // remove useless coarsened locks nodes
457 #ifdef ASSERT
458 if (_modified_nodes != NULL) {
459 _modified_nodes->remove_useless_nodes(useful.member_set());
460 }
461 #endif
462
463 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
464 bs->eliminate_useless_gc_barriers(useful, this);
465 // clean up the late inline lists
466 remove_useless_late_inlines( &_late_inlines, useful);
467 remove_useless_late_inlines( &_string_late_inlines, useful);
468 remove_useless_late_inlines( &_boxing_late_inlines, useful);
469 remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
470 debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
471 }
472
473 // ============================================================================
474 //------------------------------CompileWrapper---------------------------------
616 _has_reserved_stack_access(target->has_reserved_stack_access()),
617 #ifndef PRODUCT
618 _igv_idx(0),
619 _trace_opto_output(directive->TraceOptoOutputOption),
620 #endif
621 _has_method_handle_invokes(false),
622 _clinit_barrier_on_entry(false),
623 _stress_seed(0),
624 _comp_arena(mtCompiler),
625 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
626 _env(ci_env),
627 _directive(directive),
628 _log(ci_env->log()),
629 _failure_reason(NULL),
630 _intrinsics (comp_arena(), 0, 0, NULL),
631 _macro_nodes (comp_arena(), 8, 0, NULL),
632 _predicate_opaqs (comp_arena(), 8, 0, NULL),
633 _skeleton_predicate_opaqs (comp_arena(), 8, 0, NULL),
634 _expensive_nodes (comp_arena(), 8, 0, NULL),
635 _for_post_loop_igvn(comp_arena(), 8, 0, NULL),
636 _inline_type_nodes (comp_arena(), 8, 0, NULL),
637 _unstable_if_traps (comp_arena(), 8, 0, NULL),
638 _coarsened_locks (comp_arena(), 8, 0, NULL),
639 _congraph(NULL),
640 NOT_PRODUCT(_igv_printer(NULL) COMMA)
641 _dead_node_list(comp_arena()),
642 _dead_node_count(0),
643 _node_arena(mtCompiler),
644 _old_arena(mtCompiler),
645 _mach_constant_base_node(NULL),
646 _Compile_types(mtCompiler),
647 _initial_gvn(NULL),
648 _for_igvn(NULL),
649 _late_inlines(comp_arena(), 2, 0, NULL),
650 _string_late_inlines(comp_arena(), 2, 0, NULL),
651 _boxing_late_inlines(comp_arena(), 2, 0, NULL),
652 _vector_reboxing_late_inlines(comp_arena(), 2, 0, NULL),
653 _late_inlines_pos(0),
654 _number_of_mh_late_inlines(0),
655 _print_inlining_stream(new (mtCompiler) stringStream()),
656 _print_inlining_list(NULL),
721 // Node list that Iterative GVN will start with
722 Unique_Node_List for_igvn(comp_arena());
723 set_for_igvn(&for_igvn);
724
725 // GVN that will be run immediately on new nodes
726 uint estimated_size = method()->code_size()*4+64;
727 estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
728 PhaseGVN gvn(node_arena(), estimated_size);
729 set_initial_gvn(&gvn);
730
731 print_inlining_init();
732 { // Scope for timing the parser
733 TracePhase tp("parse", &timers[_t_parser]);
734
735 // Put top into the hash table ASAP.
736 initial_gvn()->transform_no_reclaim(top());
737
738 // Set up tf(), start(), and find a CallGenerator.
739 CallGenerator* cg = NULL;
740 if (is_osr_compilation()) {
741 init_tf(TypeFunc::make(method(), /* is_osr_compilation = */ true));
742 StartNode* s = new StartOSRNode(root(), tf()->domain_sig());
743 initial_gvn()->set_type_bottom(s);
744 init_start(s);
745 cg = CallGenerator::for_osr(method(), entry_bci());
746 } else {
747 // Normal case.
748 init_tf(TypeFunc::make(method()));
749 StartNode* s = new StartNode(root(), tf()->domain_cc());
750 initial_gvn()->set_type_bottom(s);
751 init_start(s);
752 if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
753 // With java.lang.ref.reference.get() we must go through the
754 // intrinsic - even when get() is the root
755 // method of the compile - so that, if necessary, the value in
756 // the referent field of the reference object gets recorded by
757 // the pre-barrier code.
758 cg = find_intrinsic(method(), false);
759 }
760 if (cg == NULL) {
761 float past_uses = method()->interpreter_invocation_count();
762 float expected_uses = past_uses;
763 cg = CallGenerator::for_inline(method(), expected_uses);
764 }
765 }
766 if (failing()) return;
767 if (cg == NULL) {
768 record_method_not_compilable("cannot parse method");
769 return;
848 print_ideal_ir("print_ideal");
849 }
850 #endif
851
852 #ifdef ASSERT
853 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
854 bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
855 #endif
856
857 // Dump compilation data to replay it.
858 if (directive->DumpReplayOption) {
859 env()->dump_replay_data(_compile_id);
860 }
861 if (directive->DumpInlineOption && (ilt() != NULL)) {
862 env()->dump_inline_data(_compile_id);
863 }
864
865 // Now that we know the size of all the monitors we can add a fixed slot
866 // for the original deopt pc.
867 int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
868 if (needs_stack_repair()) {
869 // One extra slot for the special stack increment value
870 next_slot += 2;
871 }
872 // TODO 8284443 Only reserve extra slot if needed
873 if (InlineTypeReturnedAsFields) {
874 // One extra slot to hold the IsInit information for a nullable
875 // inline type return if we run out of registers.
876 next_slot += 2;
877 }
878 set_fixed_slots(next_slot);
879
880 // Compute when to use implicit null checks. Used by matching trap based
881 // nodes and NullCheck optimization.
882 set_allowed_deopt_reasons();
883
884 // Now generate code
885 Code_Gen();
886 }
887
888 //------------------------------Compile----------------------------------------
889 // Compile a runtime stub
890 Compile::Compile( ciEnv* ci_env,
891 TypeFunc_generator generator,
892 address stub_function,
893 const char *stub_name,
894 int is_fancy_jump,
895 bool pass_tls,
896 bool return_pc,
897 DirectiveSet* directive)
1010 // Create Debug Information Recorder to record scopes, oopmaps, etc.
1011 env()->set_oop_recorder(new OopRecorder(env()->arena()));
1012 env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
1013 env()->set_dependencies(new Dependencies(env()));
1014
1015 _fixed_slots = 0;
1016 set_has_split_ifs(false);
1017 set_has_loops(false); // first approximation
1018 set_has_stringbuilder(false);
1019 set_has_boxed_value(false);
1020 _trap_can_recompile = false; // no traps emitted yet
1021 _major_progress = true; // start out assuming good things will happen
1022 set_has_unsafe_access(false);
1023 set_max_vector_size(0);
1024 set_clear_upper_avx(false); //false as default for clear upper bits of ymm registers
1025 Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1026 set_decompile_count(0);
1027
1028 set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
1029 _loop_opts_cnt = LoopOptsCount;
1030 _has_flattened_accesses = false;
1031 _flattened_accesses_share_alias = true;
1032 _scalarize_in_safepoints = false;
1033
1034 set_do_inlining(Inline);
1035 set_max_inline_size(MaxInlineSize);
1036 set_freq_inline_size(FreqInlineSize);
1037 set_do_scheduling(OptoScheduling);
1038
1039 set_do_vector_loop(false);
1040 set_has_monitors(false);
1041
1042 if (AllowVectorizeOnDemand) {
1043 if (has_method() && (_directive->VectorizeOption || _directive->VectorizeDebugOption)) {
1044 set_do_vector_loop(true);
1045 NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
1046 } else if (has_method() && method()->name() != 0 &&
1047 method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1048 set_do_vector_loop(true);
1049 }
1050 }
1051 set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1052 NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
1053
1308 // If this method has already thrown a range-check,
1309 // assume it was because we already tried range smearing
1310 // and it failed.
1311 uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1312 return !already_trapped;
1313 }
1314
1315
1316 //------------------------------flatten_alias_type-----------------------------
1317 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1318 assert(do_aliasing(), "Aliasing should be enabled");
1319 int offset = tj->offset();
1320 TypePtr::PTR ptr = tj->ptr();
1321
1322 // Known instance (scalarizable allocation) alias only with itself.
1323 bool is_known_inst = tj->isa_oopptr() != NULL &&
1324 tj->is_oopptr()->is_known_instance();
1325
1326 // Process weird unsafe references.
1327 if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1328 bool default_value_load = EnableValhalla && tj->is_instptr()->instance_klass() == ciEnv::current()->Class_klass();
1329 assert(InlineUnsafeOps || StressReflectiveCode || default_value_load, "indeterminate pointers come only from unsafe ops");
1330 assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1331 tj = TypeOopPtr::BOTTOM;
1332 ptr = tj->ptr();
1333 offset = tj->offset();
1334 }
1335
1336 // Array pointers need some flattening
1337 const TypeAryPtr* ta = tj->isa_aryptr();
1338 if (ta && ta->is_stable()) {
1339 // Erase stability property for alias analysis.
1340 tj = ta = ta->cast_to_stable(false);
1341 }
1342 if (ta && ta->is_not_flat()) {
1343 // Erase not flat property for alias analysis.
1344 tj = ta = ta->cast_to_not_flat(false);
1345 }
1346 if (ta && ta->is_not_null_free()) {
1347 // Erase not null free property for alias analysis.
1348 tj = ta = ta->cast_to_not_null_free(false);
1349 }
1350
1351 if( ta && is_known_inst ) {
1352 if ( offset != Type::OffsetBot &&
1353 offset > arrayOopDesc::length_offset_in_bytes() ) {
1354 offset = Type::OffsetBot; // Flatten constant access into array body only
1355 tj = ta = ta->
1356 remove_speculative()->
1357 cast_to_ptr_type(ptr)->
1358 with_offset(offset);
1359 }
1360 } else if (ta) {
1361 // For arrays indexed by constant indices, we flatten the alias
1362 // space to include all of the array body. Only the header, klass
1363 // and array length can be accessed un-aliased.
1364 // For flattened inline type array, each field has its own slice so
1365 // we must include the field offset.
1366 if( offset != Type::OffsetBot ) {
1367 if( ta->const_oop() ) { // MethodData* or Method*
1368 offset = Type::OffsetBot; // Flatten constant access into array body
1369 tj = ta = ta->
1370 remove_speculative()->
1371 cast_to_ptr_type(ptr)->
1372 cast_to_exactness(false)->
1373 with_offset(offset);
1374 } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1375 // range is OK as-is.
1376 tj = ta = TypeAryPtr::RANGE;
1377 } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1378 tj = TypeInstPtr::KLASS; // all klass loads look alike
1379 ta = TypeAryPtr::RANGE; // generic ignored junk
1380 ptr = TypePtr::BotPTR;
1381 } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1382 tj = TypeInstPtr::MARK;
1383 ta = TypeAryPtr::RANGE; // generic ignored junk
1384 ptr = TypePtr::BotPTR;
1385 } else { // Random constant offset into array body
1386 offset = Type::OffsetBot; // Flatten constant access into array body
1387 tj = ta = ta->
1388 remove_speculative()->
1389 cast_to_ptr_type(ptr)->
1390 cast_to_exactness(false)->
1391 with_offset(offset);
1392 }
1393 }
1394 // Arrays of fixed size alias with arrays of unknown size.
1395 if (ta->size() != TypeInt::POS) {
1396 const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1397 tj = ta = ta->
1398 remove_speculative()->
1399 cast_to_ptr_type(ptr)->
1400 with_ary(tary)->
1401 cast_to_exactness(false);
1402 }
1403 // Arrays of known objects become arrays of unknown objects.
1404 if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1405 const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1406 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
1407 }
1408 if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1409 const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1410 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
1411 }
1412 // Initially all flattened array accesses share a single slice
1413 if (ta->is_flat() && ta->elem() != TypeInlineType::BOTTOM && _flattened_accesses_share_alias) {
1414 const TypeAry *tary = TypeAry::make(TypeInlineType::BOTTOM, ta->size());
1415 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), Type::Offset(Type::OffsetBot));
1416 }
1417 // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1418 // cannot be distinguished by bytecode alone.
1419 if (ta->elem() == TypeInt::BOOL) {
1420 const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1421 ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1422 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset());
1423 }
1424 // During the 2nd round of IterGVN, NotNull castings are removed.
1425 // Make sure the Bottom and NotNull variants alias the same.
1426 // Also, make sure exact and non-exact variants alias the same.
1427 if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
1428 tj = ta = ta->
1429 remove_speculative()->
1430 cast_to_ptr_type(TypePtr::BotPTR)->
1431 cast_to_exactness(false)->
1432 with_offset(offset);
1433 }
1434 }
1435
1436 // Oop pointers need some flattening
1437 const TypeInstPtr *to = tj->isa_instptr();
1438 if (to && to != TypeOopPtr::BOTTOM) {
1439 ciInstanceKlass* ik = to->instance_klass();
1440 if( ptr == TypePtr::Constant ) {
1441 if (ik != ciEnv::current()->Class_klass() ||
1442 offset < ik->layout_helper_size_in_bytes()) {
1452 } else if( is_known_inst ) {
1453 tj = to; // Keep NotNull and klass_is_exact for instance type
1454 } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1455 // During the 2nd round of IterGVN, NotNull castings are removed.
1456 // Make sure the Bottom and NotNull variants alias the same.
1457 // Also, make sure exact and non-exact variants alias the same.
1458 tj = to = to->
1459 remove_speculative()->
1460 cast_to_instance_id(TypeOopPtr::InstanceBot)->
1461 cast_to_ptr_type(TypePtr::BotPTR)->
1462 cast_to_exactness(false);
1463 }
1464 if (to->speculative() != NULL) {
1465 tj = to = to->remove_speculative();
1466 }
1467 // Canonicalize the holder of this field
1468 if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1469 // First handle header references such as a LoadKlassNode, even if the
1470 // object's klass is unloaded at compile time (4965979).
1471 if (!is_known_inst) { // Do it only for non-instance types
1472 tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, Type::Offset(offset));
1473 }
1474 } else if (offset < 0 || offset >= ik->layout_helper_size_in_bytes()) {
1475 // Static fields are in the space above the normal instance
1476 // fields in the java.lang.Class instance.
1477 if (ik != ciEnv::current()->Class_klass()) {
1478 to = NULL;
1479 tj = TypeOopPtr::BOTTOM;
1480 offset = tj->offset();
1481 }
1482 } else {
1483 ciInstanceKlass *canonical_holder = ik->get_canonical_holder(offset);
1484 assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1485 if (!ik->equals(canonical_holder) || tj->offset() != offset) {
1486 if( is_known_inst ) {
1487 tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, Type::Offset(offset), canonical_holder->flatten_array(), to->instance_id());
1488 } else {
1489 tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, Type::Offset(offset));
1490 }
1491 }
1492 }
1493 }
1494
1495 // Klass pointers to object array klasses need some flattening
1496 const TypeKlassPtr *tk = tj->isa_klassptr();
1497 if( tk ) {
1498 // If we are referencing a field within a Klass, we need
1499 // to assume the worst case of an Object. Both exact and
1500 // inexact types must flatten to the same alias class so
1501 // use NotNull as the PTR.
1502 if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1503 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull,
1504 env()->Object_klass(),
1505 Type::Offset(offset));
1506 }
1507
1508 if (tk->isa_aryklassptr() && tk->is_aryklassptr()->elem()->isa_klassptr()) {
1509 ciKlass* k = ciObjArrayKlass::make(env()->Object_klass());
1510 if (!k || !k->is_loaded()) { // Only fails for some -Xcomp runs
1511 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull, env()->Object_klass(), Type::Offset(offset));
1512 } else {
1513 tj = tk = TypeAryKlassPtr::make(TypePtr::NotNull, tk->is_aryklassptr()->elem(), k, Type::Offset(offset), tk->is_not_flat(), tk->is_not_null_free(), tk->is_null_free());
1514 }
1515 }
1516
1517 // Check for precise loads from the primary supertype array and force them
1518 // to the supertype cache alias index. Check for generic array loads from
1519 // the primary supertype array and also force them to the supertype cache
1520 // alias index. Since the same load can reach both, we need to merge
1521 // these 2 disparate memories into the same alias class. Since the
1522 // primary supertype array is read-only, there's no chance of confusion
1523 // where we bypass an array load and an array store.
1524 int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1525 if (offset == Type::OffsetBot ||
1526 (offset >= primary_supers_offset &&
1527 offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1528 offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1529 offset = in_bytes(Klass::secondary_super_cache_offset());
1530 tj = tk = tk->with_offset(offset);
1531 }
1532 }
1533
1626 intptr_t key = (intptr_t) adr_type;
1627 key ^= key >> logAliasCacheSize;
1628 return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1629 }
1630
1631
1632 //-----------------------------grow_alias_types--------------------------------
1633 void Compile::grow_alias_types() {
1634 const int old_ats = _max_alias_types; // how many before?
1635 const int new_ats = old_ats; // how many more?
1636 const int grow_ats = old_ats+new_ats; // how many now?
1637 _max_alias_types = grow_ats;
1638 _alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1639 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1640 Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1641 for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i];
1642 }
1643
1644
1645 //--------------------------------find_alias_type------------------------------
1646 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field, bool uncached) {
1647 if (!do_aliasing()) {
1648 return alias_type(AliasIdxBot);
1649 }
1650
1651 AliasCacheEntry* ace = NULL;
1652 if (!uncached) {
1653 ace = probe_alias_cache(adr_type);
1654 if (ace->_adr_type == adr_type) {
1655 return alias_type(ace->_index);
1656 }
1657 }
1658
1659 // Handle special cases.
1660 if (adr_type == NULL) return alias_type(AliasIdxTop);
1661 if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot);
1662
1663 // Do it the slow way.
1664 const TypePtr* flat = flatten_alias_type(adr_type);
1665
1666 #ifdef ASSERT
1667 {
1668 ResourceMark rm;
1669 assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1670 Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1671 assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1672 Type::str(adr_type));
1673 if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1674 const TypeOopPtr* foop = flat->is_oopptr();
1675 // Scalarizable allocations have exact klass always.
1676 bool exact = !foop->klass_is_exact() || foop->is_known_instance();
1686 if (alias_type(i)->adr_type() == flat) {
1687 idx = i;
1688 break;
1689 }
1690 }
1691
1692 if (idx == AliasIdxTop) {
1693 if (no_create) return NULL;
1694 // Grow the array if necessary.
1695 if (_num_alias_types == _max_alias_types) grow_alias_types();
1696 // Add a new alias type.
1697 idx = _num_alias_types++;
1698 _alias_types[idx]->Init(idx, flat);
1699 if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false);
1700 if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false);
1701 if (flat->isa_instptr()) {
1702 if (flat->offset() == java_lang_Class::klass_offset()
1703 && flat->is_instptr()->instance_klass() == env()->Class_klass())
1704 alias_type(idx)->set_rewritable(false);
1705 }
1706 ciField* field = NULL;
1707 if (flat->isa_aryptr()) {
1708 #ifdef ASSERT
1709 const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1710 // (T_BYTE has the weakest alignment and size restrictions...)
1711 assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1712 #endif
1713 const Type* elemtype = flat->is_aryptr()->elem();
1714 if (flat->offset() == TypePtr::OffsetBot) {
1715 alias_type(idx)->set_element(elemtype);
1716 }
1717 int field_offset = flat->is_aryptr()->field_offset().get();
1718 if (elemtype->isa_inlinetype() &&
1719 field_offset != Type::OffsetBot) {
1720 ciInlineKlass* vk = elemtype->inline_klass();
1721 field_offset += vk->first_field_offset();
1722 field = vk->get_field_by_offset(field_offset, false);
1723 }
1724 }
1725 if (flat->isa_klassptr()) {
1726 if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1727 alias_type(idx)->set_rewritable(false);
1728 if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1729 alias_type(idx)->set_rewritable(false);
1730 if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1731 alias_type(idx)->set_rewritable(false);
1732 if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1733 alias_type(idx)->set_rewritable(false);
1734 if (flat->offset() == in_bytes(Klass::layout_helper_offset()))
1735 alias_type(idx)->set_rewritable(false);
1736 if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1737 alias_type(idx)->set_rewritable(false);
1738 }
1739 // %%% (We would like to finalize JavaThread::threadObj_offset(),
1740 // but the base pointer type is not distinctive enough to identify
1741 // references into JavaThread.)
1742
1743 // Check for final fields.
1744 const TypeInstPtr* tinst = flat->isa_instptr();
1745 if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1746 if (tinst->const_oop() != NULL &&
1747 tinst->instance_klass() == ciEnv::current()->Class_klass() &&
1748 tinst->offset() >= (tinst->instance_klass()->layout_helper_size_in_bytes())) {
1749 // static field
1750 ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1751 field = k->get_field_by_offset(tinst->offset(), true);
1752 } else if (tinst->is_inlinetypeptr()) {
1753 // Inline type field
1754 ciInlineKlass* vk = tinst->inline_klass();
1755 field = vk->get_field_by_offset(tinst->offset(), false);
1756 } else {
1757 ciInstanceKlass *k = tinst->instance_klass();
1758 field = k->get_field_by_offset(tinst->offset(), false);
1759 }
1760 }
1761 assert(field == NULL ||
1762 original_field == NULL ||
1763 (field->holder() == original_field->holder() &&
1764 field->offset() == original_field->offset() &&
1765 field->is_static() == original_field->is_static()), "wrong field?");
1766 // Set field() and is_rewritable() attributes.
1767 if (field != NULL) {
1768 alias_type(idx)->set_field(field);
1769 if (flat->isa_aryptr()) {
1770 // Fields of flat arrays are rewritable although they are declared final
1771 assert(flat->is_aryptr()->is_flat(), "must be a flat array");
1772 alias_type(idx)->set_rewritable(true);
1773 }
1774 }
1775 }
1776
1777 // Fill the cache for next time.
1778 if (!uncached) {
1779 ace->_adr_type = adr_type;
1780 ace->_index = idx;
1781 assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
1782
1783 // Might as well try to fill the cache for the flattened version, too.
1784 AliasCacheEntry* face = probe_alias_cache(flat);
1785 if (face->_adr_type == NULL) {
1786 face->_adr_type = flat;
1787 face->_index = idx;
1788 assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1789 }
1790 }
1791
1792 return alias_type(idx);
1793 }
1794
1795
1796 Compile::AliasType* Compile::alias_type(ciField* field) {
1797 const TypeOopPtr* t;
1798 if (field->is_static())
1799 t = TypeInstPtr::make(field->holder()->java_mirror());
1800 else
1801 t = TypeOopPtr::make_from_klass_raw(field->holder());
1802 AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1803 assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1804 return atp;
1805 }
1806
1807
1808 //------------------------------have_alias_type--------------------------------
1809 bool Compile::have_alias_type(const TypePtr* adr_type) {
1886 C->set_post_loop_opts_phase(); // no more loop opts allowed
1887
1888 assert(!C->major_progress(), "not cleared");
1889
1890 if (_for_post_loop_igvn.length() > 0) {
1891 while (_for_post_loop_igvn.length() > 0) {
1892 Node* n = _for_post_loop_igvn.pop();
1893 n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1894 igvn._worklist.push(n);
1895 }
1896 igvn.optimize();
1897 assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1898
1899 // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1900 if (C->major_progress()) {
1901 C->clear_major_progress(); // ensure that major progress is now clear
1902 }
1903 }
1904 }
1905
1906 void Compile::add_inline_type(Node* n) {
1907 assert(n->is_InlineType(), "unexpected node");
1908 _inline_type_nodes.push(n);
1909 }
1910
1911 void Compile::remove_inline_type(Node* n) {
1912 assert(n->is_InlineType(), "unexpected node");
1913 if (_inline_type_nodes.contains(n)) {
1914 _inline_type_nodes.remove(n);
1915 }
1916 }
1917
1918 // Does the return value keep otherwise useless inline type allocations alive?
1919 static bool return_val_keeps_allocations_alive(Node* ret_val) {
1920 ResourceMark rm;
1921 Unique_Node_List wq;
1922 wq.push(ret_val);
1923 bool some_allocations = false;
1924 for (uint i = 0; i < wq.size(); i++) {
1925 Node* n = wq.at(i);
1926 assert(n == ret_val || !n->is_InlineType(), "chain of inline type nodes");
1927 if (n->outcnt() > 1) {
1928 // Some other use for the allocation
1929 return false;
1930 } else if (n->is_InlineType()) {
1931 wq.push(n->in(1));
1932 } else if (n->is_Phi()) {
1933 for (uint j = 1; j < n->req(); j++) {
1934 wq.push(n->in(j));
1935 }
1936 } else if (n->is_CheckCastPP() &&
1937 n->in(1)->is_Proj() &&
1938 n->in(1)->in(0)->is_Allocate()) {
1939 some_allocations = true;
1940 } else if (n->is_CheckCastPP()) {
1941 wq.push(n->in(1));
1942 }
1943 }
1944 return some_allocations;
1945 }
1946
1947 void Compile::process_inline_types(PhaseIterGVN &igvn, bool remove) {
1948 // Make sure that the return value does not keep an otherwise unused allocation alive
1949 if (tf()->returns_inline_type_as_fields()) {
1950 Node* ret = NULL;
1951 for (uint i = 1; i < root()->req(); i++) {
1952 Node* in = root()->in(i);
1953 if (in->Opcode() == Op_Return) {
1954 assert(ret == NULL, "only one return");
1955 ret = in;
1956 }
1957 }
1958 if (ret != NULL) {
1959 Node* ret_val = ret->in(TypeFunc::Parms);
1960 if (igvn.type(ret_val)->isa_oopptr() &&
1961 return_val_keeps_allocations_alive(ret_val)) {
1962 igvn.replace_input_of(ret, TypeFunc::Parms, InlineTypeNode::tagged_klass(igvn.type(ret_val)->inline_klass(), igvn));
1963 assert(ret_val->outcnt() == 0, "should be dead now");
1964 igvn.remove_dead_node(ret_val);
1965 }
1966 }
1967 }
1968 if (_inline_type_nodes.length() == 0) {
1969 return;
1970 }
1971 // Scalarize inline types in safepoint debug info.
1972 // Delay this until all inlining is over to avoid getting inconsistent debug info.
1973 set_scalarize_in_safepoints(true);
1974 for (int i = _inline_type_nodes.length()-1; i >= 0; i--) {
1975 _inline_type_nodes.at(i)->as_InlineType()->make_scalar_in_safepoints(&igvn);
1976 }
1977 if (remove) {
1978 // Remove inline type nodes by replacing them with their oop input
1979 while (_inline_type_nodes.length() > 0) {
1980 InlineTypeNode* vt = _inline_type_nodes.pop()->as_InlineType();
1981 if (vt->outcnt() == 0) {
1982 igvn.remove_dead_node(vt);
1983 continue;
1984 }
1985 for (DUIterator i = vt->outs(); vt->has_out(i); i++) {
1986 DEBUG_ONLY(bool must_be_buffered = false);
1987 Node* u = vt->out(i);
1988 // Check if any users are blackholes. If so, rewrite them to use either the
1989 // allocated buffer, or individual components, instead of the inline type node
1990 // that goes away.
1991 if (u->is_Blackhole()) {
1992 BlackholeNode* bh = u->as_Blackhole();
1993
1994 // Unlink the old input
1995 int idx = bh->find_edge(vt);
1996 assert(idx != -1, "The edge should be there");
1997 bh->del_req(idx);
1998 --i;
1999
2000 if (vt->is_allocated(&igvn)) {
2001 // Already has the allocated instance, blackhole that
2002 bh->add_req(vt->get_oop());
2003 } else {
2004 // Not allocated yet, blackhole the components
2005 for (uint c = 0; c < vt->field_count(); c++) {
2006 bh->add_req(vt->field_value(c));
2007 }
2008 }
2009
2010 // Node modified, record for IGVN
2011 igvn.record_for_igvn(bh);
2012 }
2013 #ifdef ASSERT
2014 // Verify that inline type is buffered when replacing by oop
2015 else if (u->is_InlineType()) {
2016 InlineTypeNode* vt2 = u->as_InlineType();
2017 for (uint i = 0; i < vt2->field_count(); ++i) {
2018 if (vt2->field_value(i) == vt && !vt2->field_is_flattened(i)) {
2019 // Use in non-flat field
2020 must_be_buffered = true;
2021 }
2022 }
2023 } else if (u->Opcode() != Op_Return || !tf()->returns_inline_type_as_fields()) {
2024 must_be_buffered = true;
2025 }
2026 if (must_be_buffered && !vt->is_allocated(&igvn)) {
2027 vt->dump(-3);
2028 assert(false, "Should have been buffered");
2029 }
2030 #endif
2031 }
2032 igvn.replace_node(vt, vt->get_oop());
2033 }
2034 }
2035 igvn.optimize();
2036 }
2037
2038 void Compile::adjust_flattened_array_access_aliases(PhaseIterGVN& igvn) {
2039 if (!_has_flattened_accesses) {
2040 return;
2041 }
2042 // Initially, all flattened array accesses share the same slice to
2043 // keep dependencies with Object[] array accesses (that could be
2044 // to a flattened array) correct. We're done with parsing so we
2045 // now know all flattened array accesses in this compile
2046 // unit. Let's move flattened array accesses to their own slice,
2047 // one per element field. This should help memory access
2048 // optimizations.
2049 ResourceMark rm;
2050 Unique_Node_List wq;
2051 wq.push(root());
2052
2053 Node_List mergememnodes;
2054 Node_List memnodes;
2055
2056 // Alias index currently shared by all flattened memory accesses
2057 int index = get_alias_index(TypeAryPtr::INLINES);
2058
2059 // Find MergeMem nodes and flattened array accesses
2060 for (uint i = 0; i < wq.size(); i++) {
2061 Node* n = wq.at(i);
2062 if (n->is_Mem()) {
2063 const TypePtr* adr_type = NULL;
2064 if (n->Opcode() == Op_StoreCM) {
2065 adr_type = get_adr_type(get_alias_index(n->in(MemNode::OopStore)->adr_type()));
2066 } else {
2067 adr_type = get_adr_type(get_alias_index(n->adr_type()));
2068 }
2069 if (adr_type == TypeAryPtr::INLINES) {
2070 memnodes.push(n);
2071 }
2072 } else if (n->is_MergeMem()) {
2073 MergeMemNode* mm = n->as_MergeMem();
2074 if (mm->memory_at(index) != mm->base_memory()) {
2075 mergememnodes.push(n);
2076 }
2077 }
2078 for (uint j = 0; j < n->req(); j++) {
2079 Node* m = n->in(j);
2080 if (m != NULL) {
2081 wq.push(m);
2082 }
2083 }
2084 }
2085
2086 if (memnodes.size() > 0) {
2087 _flattened_accesses_share_alias = false;
2088
2089 // We are going to change the slice for the flattened array
2090 // accesses so we need to clear the cache entries that refer to
2091 // them.
2092 for (uint i = 0; i < AliasCacheSize; i++) {
2093 AliasCacheEntry* ace = &_alias_cache[i];
2094 if (ace->_adr_type != NULL &&
2095 ace->_adr_type->isa_aryptr() &&
2096 ace->_adr_type->is_aryptr()->is_flat()) {
2097 ace->_adr_type = NULL;
2098 ace->_index = (i != 0) ? 0 : AliasIdxTop; // Make sure the NULL adr_type resolves to AliasIdxTop
2099 }
2100 }
2101
2102 // Find what aliases we are going to add
2103 int start_alias = num_alias_types()-1;
2104 int stop_alias = 0;
2105
2106 for (uint i = 0; i < memnodes.size(); i++) {
2107 Node* m = memnodes.at(i);
2108 const TypePtr* adr_type = NULL;
2109 if (m->Opcode() == Op_StoreCM) {
2110 adr_type = m->in(MemNode::OopStore)->adr_type();
2111 if (adr_type != TypeAryPtr::INLINES) {
2112 // store was optimized out and we lost track of the adr_type
2113 Node* clone = new StoreCMNode(m->in(MemNode::Control), m->in(MemNode::Memory), m->in(MemNode::Address),
2114 m->adr_type(), m->in(MemNode::ValueIn), m->in(MemNode::OopStore),
2115 get_alias_index(adr_type));
2116 igvn.register_new_node_with_optimizer(clone);
2117 igvn.replace_node(m, clone);
2118 }
2119 } else {
2120 adr_type = m->adr_type();
2121 #ifdef ASSERT
2122 m->as_Mem()->set_adr_type(adr_type);
2123 #endif
2124 }
2125 int idx = get_alias_index(adr_type);
2126 start_alias = MIN2(start_alias, idx);
2127 stop_alias = MAX2(stop_alias, idx);
2128 }
2129
2130 assert(stop_alias >= start_alias, "should have expanded aliases");
2131
2132 Node_Stack stack(0);
2133 #ifdef ASSERT
2134 VectorSet seen(Thread::current()->resource_area());
2135 #endif
2136 // Now let's fix the memory graph so each flattened array access
2137 // is moved to the right slice. Start from the MergeMem nodes.
2138 uint last = unique();
2139 for (uint i = 0; i < mergememnodes.size(); i++) {
2140 MergeMemNode* current = mergememnodes.at(i)->as_MergeMem();
2141 Node* n = current->memory_at(index);
2142 MergeMemNode* mm = NULL;
2143 do {
2144 // Follow memory edges through memory accesses, phis and
2145 // narrow membars and push nodes on the stack. Once we hit
2146 // bottom memory, we pop element off the stack one at a
2147 // time, in reverse order, and move them to the right slice
2148 // by changing their memory edges.
2149 if ((n->is_Phi() && n->adr_type() != TypePtr::BOTTOM) || n->is_Mem() || n->adr_type() == TypeAryPtr::INLINES) {
2150 assert(!seen.test_set(n->_idx), "");
2151 // Uses (a load for instance) will need to be moved to the
2152 // right slice as well and will get a new memory state
2153 // that we don't know yet. The use could also be the
2154 // backedge of a loop. We put a place holder node between
2155 // the memory node and its uses. We replace that place
2156 // holder with the correct memory state once we know it,
2157 // i.e. when nodes are popped off the stack. Using the
2158 // place holder make the logic work in the presence of
2159 // loops.
2160 if (n->outcnt() > 1) {
2161 Node* place_holder = NULL;
2162 assert(!n->has_out_with(Op_Node), "");
2163 for (DUIterator k = n->outs(); n->has_out(k); k++) {
2164 Node* u = n->out(k);
2165 if (u != current && u->_idx < last) {
2166 bool success = false;
2167 for (uint l = 0; l < u->req(); l++) {
2168 if (!stack.is_empty() && u == stack.node() && l == stack.index()) {
2169 continue;
2170 }
2171 Node* in = u->in(l);
2172 if (in == n) {
2173 if (place_holder == NULL) {
2174 place_holder = new Node(1);
2175 place_holder->init_req(0, n);
2176 }
2177 igvn.replace_input_of(u, l, place_holder);
2178 success = true;
2179 }
2180 }
2181 if (success) {
2182 --k;
2183 }
2184 }
2185 }
2186 }
2187 if (n->is_Phi()) {
2188 stack.push(n, 1);
2189 n = n->in(1);
2190 } else if (n->is_Mem()) {
2191 stack.push(n, n->req());
2192 n = n->in(MemNode::Memory);
2193 } else {
2194 assert(n->is_Proj() && n->in(0)->Opcode() == Op_MemBarCPUOrder, "");
2195 stack.push(n, n->req());
2196 n = n->in(0)->in(TypeFunc::Memory);
2197 }
2198 } else {
2199 assert(n->adr_type() == TypePtr::BOTTOM || (n->Opcode() == Op_Node && n->_idx >= last) || (n->is_Proj() && n->in(0)->is_Initialize()), "");
2200 // Build a new MergeMem node to carry the new memory state
2201 // as we build it. IGVN should fold extraneous MergeMem
2202 // nodes.
2203 mm = MergeMemNode::make(n);
2204 igvn.register_new_node_with_optimizer(mm);
2205 while (stack.size() > 0) {
2206 Node* m = stack.node();
2207 uint idx = stack.index();
2208 if (m->is_Mem()) {
2209 // Move memory node to its new slice
2210 const TypePtr* adr_type = m->adr_type();
2211 int alias = get_alias_index(adr_type);
2212 Node* prev = mm->memory_at(alias);
2213 igvn.replace_input_of(m, MemNode::Memory, prev);
2214 mm->set_memory_at(alias, m);
2215 } else if (m->is_Phi()) {
2216 // We need as many new phis as there are new aliases
2217 igvn.replace_input_of(m, idx, mm);
2218 if (idx == m->req()-1) {
2219 Node* r = m->in(0);
2220 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2221 const Type* adr_type = get_adr_type(j);
2222 if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat() || j == (uint)index) {
2223 continue;
2224 }
2225 Node* phi = new PhiNode(r, Type::MEMORY, get_adr_type(j));
2226 igvn.register_new_node_with_optimizer(phi);
2227 for (uint k = 1; k < m->req(); k++) {
2228 phi->init_req(k, m->in(k)->as_MergeMem()->memory_at(j));
2229 }
2230 mm->set_memory_at(j, phi);
2231 }
2232 Node* base_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2233 igvn.register_new_node_with_optimizer(base_phi);
2234 for (uint k = 1; k < m->req(); k++) {
2235 base_phi->init_req(k, m->in(k)->as_MergeMem()->base_memory());
2236 }
2237 mm->set_base_memory(base_phi);
2238 }
2239 } else {
2240 // This is a MemBarCPUOrder node from
2241 // Parse::array_load()/Parse::array_store(), in the
2242 // branch that handles flattened arrays hidden under
2243 // an Object[] array. We also need one new membar per
2244 // new alias to keep the unknown access that the
2245 // membars protect properly ordered with accesses to
2246 // known flattened array.
2247 assert(m->is_Proj(), "projection expected");
2248 Node* ctrl = m->in(0)->in(TypeFunc::Control);
2249 igvn.replace_input_of(m->in(0), TypeFunc::Control, top());
2250 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2251 const Type* adr_type = get_adr_type(j);
2252 if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat() || j == (uint)index) {
2253 continue;
2254 }
2255 MemBarNode* mb = new MemBarCPUOrderNode(this, j, NULL);
2256 igvn.register_new_node_with_optimizer(mb);
2257 Node* mem = mm->memory_at(j);
2258 mb->init_req(TypeFunc::Control, ctrl);
2259 mb->init_req(TypeFunc::Memory, mem);
2260 ctrl = new ProjNode(mb, TypeFunc::Control);
2261 igvn.register_new_node_with_optimizer(ctrl);
2262 mem = new ProjNode(mb, TypeFunc::Memory);
2263 igvn.register_new_node_with_optimizer(mem);
2264 mm->set_memory_at(j, mem);
2265 }
2266 igvn.replace_node(m->in(0)->as_Multi()->proj_out(TypeFunc::Control), ctrl);
2267 }
2268 if (idx < m->req()-1) {
2269 idx += 1;
2270 stack.set_index(idx);
2271 n = m->in(idx);
2272 break;
2273 }
2274 // Take care of place holder nodes
2275 if (m->has_out_with(Op_Node)) {
2276 Node* place_holder = m->find_out_with(Op_Node);
2277 if (place_holder != NULL) {
2278 Node* mm_clone = mm->clone();
2279 igvn.register_new_node_with_optimizer(mm_clone);
2280 Node* hook = new Node(1);
2281 hook->init_req(0, mm);
2282 igvn.replace_node(place_holder, mm_clone);
2283 hook->destruct(&igvn);
2284 }
2285 assert(!m->has_out_with(Op_Node), "place holder should be gone now");
2286 }
2287 stack.pop();
2288 }
2289 }
2290 } while(stack.size() > 0);
2291 // Fix the memory state at the MergeMem we started from
2292 igvn.rehash_node_delayed(current);
2293 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2294 const Type* adr_type = get_adr_type(j);
2295 if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat()) {
2296 continue;
2297 }
2298 current->set_memory_at(j, mm);
2299 }
2300 current->set_memory_at(index, current->base_memory());
2301 }
2302 igvn.optimize();
2303 }
2304 print_method(PHASE_SPLIT_INLINES_ARRAY, 2);
2305 #ifdef ASSERT
2306 if (!_flattened_accesses_share_alias) {
2307 wq.clear();
2308 wq.push(root());
2309 for (uint i = 0; i < wq.size(); i++) {
2310 Node* n = wq.at(i);
2311 assert(n->adr_type() != TypeAryPtr::INLINES, "should have been removed from the graph");
2312 for (uint j = 0; j < n->req(); j++) {
2313 Node* m = n->in(j);
2314 if (m != NULL) {
2315 wq.push(m);
2316 }
2317 }
2318 }
2319 }
2320 #endif
2321 }
2322
2323 void Compile::record_unstable_if_trap(UnstableIfTrap* trap) {
2324 if (OptimizeUnstableIf) {
2325 _unstable_if_traps.append(trap);
2326 }
2327 }
2328
2329 void Compile::remove_useless_unstable_if_traps(Unique_Node_List& useful) {
2330 for (int i = _unstable_if_traps.length() - 1; i >= 0; i--) {
2331 UnstableIfTrap* trap = _unstable_if_traps.at(i);
2332 Node* n = trap->uncommon_trap();
2333 if (!useful.member(n)) {
2334 _unstable_if_traps.delete_at(i); // replaces i-th with last element which is known to be useful (already processed)
2335 }
2336 }
2337 }
2338
2339 // Remove the unstable if trap associated with 'unc' from candidates. It is either dead
2340 // or fold-compares case. Return true if succeed or not found.
2341 //
2342 // In rare cases, the found trap has been processed. It is too late to delete it. Return
2592 assert(has_stringbuilder(), "inconsistent");
2593 for_igvn()->clear();
2594 initial_gvn()->replace_with(&igvn);
2595
2596 inline_string_calls(false);
2597
2598 if (failing()) return;
2599
2600 inline_incrementally_cleanup(igvn);
2601 }
2602
2603 set_inlining_incrementally(false);
2604 }
2605
2606 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2607 // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2608 // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2609 // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == NULL"
2610 // as if "inlining_incrementally() == true" were set.
2611 assert(inlining_incrementally() == false, "not allowed");
2612 #ifdef ASSERT
2613 Unique_Node_List* modified_nodes = _modified_nodes;
2614 _modified_nodes = NULL;
2615 #endif
2616 assert(_late_inlines.length() > 0, "sanity");
2617
2618 while (_late_inlines.length() > 0) {
2619 for_igvn()->clear();
2620 initial_gvn()->replace_with(&igvn);
2621
2622 while (inline_incrementally_one()) {
2623 assert(!failing(), "inconsistent");
2624 }
2625 if (failing()) return;
2626
2627 inline_incrementally_cleanup(igvn);
2628 }
2629 DEBUG_ONLY( _modified_nodes = modified_nodes; )
2630 }
2631
2632 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2633 if (_loop_opts_cnt > 0) {
2634 while (major_progress() && (_loop_opts_cnt > 0)) {
2635 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2636 PhaseIdealLoop::optimize(igvn, mode);
2637 _loop_opts_cnt--;
2638 if (failing()) return false;
2639 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2640 }
2641 }
2642 return true;
2643 }
2644
2645 // Remove edges from "root" to each SafePoint at a backward branch.
2646 // They were inserted during parsing (see add_safepoint()) to make
2647 // infinite loops without calls or exceptions visible to root, i.e.,
2648 // useful.
2649 void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) {
2753 Compile::TracePhase tp("", &timers[_t_renumberLive]);
2754 initial_gvn()->replace_with(&igvn);
2755 Unique_Node_List* old_worklist = for_igvn();
2756 old_worklist->clear();
2757 Unique_Node_List new_worklist(C->comp_arena());
2758 {
2759 ResourceMark rm;
2760 PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2761 }
2762 Unique_Node_List* save_for_igvn = for_igvn();
2763 set_for_igvn(&new_worklist);
2764 igvn = PhaseIterGVN(initial_gvn());
2765 igvn.optimize();
2766 set_for_igvn(old_worklist); // new_worklist is dead beyond this point
2767 }
2768
2769 // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
2770 // safepoints
2771 remove_root_to_sfpts_edges(igvn);
2772
2773 // Process inline type nodes now that all inlining is over
2774 process_inline_types(igvn);
2775
2776 adjust_flattened_array_access_aliases(igvn);
2777
2778 // Perform escape analysis
2779 if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) {
2780 if (has_loops()) {
2781 // Cleanup graph (remove dead nodes).
2782 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2783 PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2784 if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2785 if (failing()) return;
2786 }
2787 bool progress;
2788 do {
2789 ConnectionGraph::do_analysis(this, &igvn);
2790
2791 if (failing()) return;
2792
2793 int mcount = macro_count(); // Record number of allocations and locks before IGVN
2794
2795 // Optimize out fields loads from scalar replaceable allocations.
2796 igvn.optimize();
2797 print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2871 print_method(PHASE_ITER_GVN2, 2);
2872
2873 if (failing()) return;
2874
2875 // Loop transforms on the ideal graph. Range Check Elimination,
2876 // peeling, unrolling, etc.
2877 if (!optimize_loops(igvn, LoopOptsDefault)) {
2878 return;
2879 }
2880
2881 if (failing()) return;
2882
2883 C->clear_major_progress(); // ensure that major progress is now clear
2884
2885 process_for_post_loop_opts_igvn(igvn);
2886
2887 #ifdef ASSERT
2888 bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2889 #endif
2890
2891 assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2892
2893 if (_late_inlines.length() > 0) {
2894 // More opportunities to optimize virtual and MH calls.
2895 // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2896 process_late_inline_calls_no_inline(igvn);
2897 }
2898
2899 {
2900 TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2901 PhaseMacroExpand mex(igvn);
2902 if (mex.expand_macro_nodes()) {
2903 assert(failing(), "must bail out w/ explicit message");
2904 return;
2905 }
2906 print_method(PHASE_MACRO_EXPANSION, 2);
2907 }
2908
2909 // Process inline type nodes again and remove them. From here
2910 // on we don't need to keep track of field values anymore.
2911 process_inline_types(igvn, /* remove= */ true);
2912
2913 {
2914 TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
2915 if (bs->expand_barriers(this, igvn)) {
2916 assert(failing(), "must bail out w/ explicit message");
2917 return;
2918 }
2919 print_method(PHASE_BARRIER_EXPANSION, 2);
2920 }
2921
2922 if (C->max_vector_size() > 0) {
2923 C->optimize_logic_cones(igvn);
2924 igvn.optimize();
2925 }
2926
2927 DEBUG_ONLY( _modified_nodes = NULL; )
2928 DEBUG_ONLY( _late_inlines.clear(); )
2929
2930 assert(igvn._worklist.size() == 0, "not empty");
2931 } // (End scope of igvn; run destructor if necessary for asserts.)
2932
2933 check_no_dead_use();
2934
2935 process_print_inlining();
2936
2937 // A method with only infinite loops has no edges entering loops from root
2938 {
2939 TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2940 if (final_graph_reshaping()) {
2941 assert(failing(), "must bail out w/ explicit message");
2942 return;
2943 }
2944 }
2945
2946 print_method(PHASE_OPTIMIZE_FINISHED, 2);
2947 DEBUG_ONLY(set_phase_optimize_finished();)
2948 }
2949
2950 #ifdef ASSERT
3535 // Accumulate any precedence edges
3536 if (mem->in(i) != NULL) {
3537 n->add_prec(mem->in(i));
3538 }
3539 }
3540 // Everything above this point has been processed.
3541 done = true;
3542 }
3543 // Eliminate the previous StoreCM
3544 prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
3545 assert(mem->outcnt() == 0, "should be dead");
3546 mem->disconnect_inputs(this);
3547 } else {
3548 prev = mem;
3549 }
3550 mem = prev->in(MemNode::Memory);
3551 }
3552 }
3553 }
3554
3555
3556 //------------------------------final_graph_reshaping_impl----------------------
3557 // Implement items 1-5 from final_graph_reshaping below.
3558 void Compile::final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
3559
3560 if ( n->outcnt() == 0 ) return; // dead node
3561 uint nop = n->Opcode();
3562
3563 // Check for 2-input instruction with "last use" on right input.
3564 // Swap to left input. Implements item (2).
3565 if( n->req() == 3 && // two-input instruction
3566 n->in(1)->outcnt() > 1 && // left use is NOT a last use
3567 (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
3568 n->in(2)->outcnt() == 1 &&// right use IS a last use
3569 !n->in(2)->is_Con() ) { // right use is not a constant
3570 // Check for commutative opcode
3571 switch( nop ) {
3572 case Op_AddI: case Op_AddF: case Op_AddD: case Op_AddL:
3573 case Op_MaxI: case Op_MaxL: case Op_MaxF: case Op_MaxD:
3574 case Op_MinI: case Op_MinL: case Op_MinF: case Op_MinD:
3575 case Op_MulI: case Op_MulF: case Op_MulD: case Op_MulL:
3688 if (n->outcnt() > 1 &&
3689 !n->is_Proj() &&
3690 nop != Op_CreateEx &&
3691 nop != Op_CheckCastPP &&
3692 nop != Op_DecodeN &&
3693 nop != Op_DecodeNKlass &&
3694 !n->is_Mem() &&
3695 !n->is_Phi()) {
3696 Node *x = n->clone();
3697 call->set_req(TypeFunc::Parms, x);
3698 }
3699 }
3700 break;
3701 }
3702
3703 case Op_StoreCM:
3704 {
3705 // Convert OopStore dependence into precedence edge
3706 Node* prec = n->in(MemNode::OopStore);
3707 n->del_req(MemNode::OopStore);
3708 if (prec->is_MergeMem()) {
3709 MergeMemNode* mm = prec->as_MergeMem();
3710 Node* base = mm->base_memory();
3711 for (int i = AliasIdxRaw + 1; i < num_alias_types(); i++) {
3712 const Type* adr_type = get_adr_type(i);
3713 if (adr_type->isa_aryptr() && adr_type->is_aryptr()->is_flat()) {
3714 Node* m = mm->memory_at(i);
3715 n->add_prec(m);
3716 }
3717 }
3718 if (mm->outcnt() == 0) {
3719 mm->disconnect_inputs(this);
3720 }
3721 } else {
3722 n->add_prec(prec);
3723 }
3724 eliminate_redundant_card_marks(n);
3725 }
3726
3727 // fall through
3728
3729 case Op_StoreB:
3730 case Op_StoreC:
3731 case Op_StoreI:
3732 case Op_StoreL:
3733 case Op_CompareAndSwapB:
3734 case Op_CompareAndSwapS:
3735 case Op_CompareAndSwapI:
3736 case Op_CompareAndSwapL:
3737 case Op_CompareAndSwapP:
3738 case Op_CompareAndSwapN:
3739 case Op_WeakCompareAndSwapB:
3740 case Op_WeakCompareAndSwapS:
3741 case Op_WeakCompareAndSwapI:
3742 case Op_WeakCompareAndSwapL:
3743 case Op_WeakCompareAndSwapP:
4299 // Replace all nodes with identical edges as m with m
4300 k->subsume_by(m, this);
4301 }
4302 }
4303 }
4304 break;
4305 }
4306 case Op_CmpUL: {
4307 if (!Matcher::has_match_rule(Op_CmpUL)) {
4308 // No support for unsigned long comparisons
4309 ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
4310 Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
4311 Node* orl = new OrLNode(n->in(1), sign_bit_mask);
4312 ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
4313 Node* andl = new AndLNode(orl, remove_sign_mask);
4314 Node* cmp = new CmpLNode(andl, n->in(2));
4315 n->subsume_by(cmp, this);
4316 }
4317 break;
4318 }
4319 #ifdef ASSERT
4320 case Op_InlineType: {
4321 n->dump(-1);
4322 assert(false, "inline type node was not removed");
4323 break;
4324 }
4325 #endif
4326 default:
4327 assert(!n->is_Call(), "");
4328 assert(!n->is_Mem(), "");
4329 assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
4330 break;
4331 }
4332 }
4333
4334 //------------------------------final_graph_reshaping_walk---------------------
4335 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
4336 // requires that the walk visits a node's inputs before visiting the node.
4337 void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
4338 Unique_Node_List sfpt;
4339
4340 frc._visited.set(root->_idx); // first, mark node as visited
4341 uint cnt = root->req();
4342 Node *n = root;
4343 uint i = 0;
4344 while (true) {
4345 if (i < cnt) {
4682 }
4683 }
4684
4685 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4686 return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4687 }
4688
4689 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4690 return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4691 }
4692
4693 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4694 if (holder->is_initialized()) {
4695 return false;
4696 }
4697 if (holder->is_being_initialized()) {
4698 if (accessing_method->holder() == holder) {
4699 // Access inside a class. The barrier can be elided when access happens in <clinit>,
4700 // <init>, or a static method. In all those cases, there was an initialization
4701 // barrier on the holder klass passed.
4702 if (accessing_method->is_class_initializer() ||
4703 accessing_method->is_object_constructor() ||
4704 accessing_method->is_static()) {
4705 return false;
4706 }
4707 } else if (accessing_method->holder()->is_subclass_of(holder)) {
4708 // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4709 // In case of <init> or a static method, the barrier is on the subclass is not enough:
4710 // child class can become fully initialized while its parent class is still being initialized.
4711 if (accessing_method->is_class_initializer()) {
4712 return false;
4713 }
4714 }
4715 ciMethod* root = method(); // the root method of compilation
4716 if (root != accessing_method) {
4717 return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4718 }
4719 }
4720 return true;
4721 }
4722
4723 #ifndef PRODUCT
4724 //------------------------------verify_bidirectional_edges---------------------
4725 // For each input edge to a node (ie - for each Use-Def edge), verify that
4726 // there is a corresponding Def-Use edge.
4727 void Compile::verify_bidirectional_edges(Unique_Node_List &visited) {
4728 // Allocate stack of size C->live_nodes()/16 to avoid frequent realloc
4729 uint stack_size = live_nodes() >> 4;
4730 Node_List nstack(MAX2(stack_size, (uint)OptoNodeListSize));
4731 nstack.push(_root);
4747 if (in != NULL && !in->is_top()) {
4748 // Count instances of `next`
4749 int cnt = 0;
4750 for (uint idx = 0; idx < in->_outcnt; idx++) {
4751 if (in->_out[idx] == n) {
4752 cnt++;
4753 }
4754 }
4755 assert(cnt > 0, "Failed to find Def-Use edge.");
4756 // Check for duplicate edges
4757 // walk the input array downcounting the input edges to n
4758 for (uint j = 0; j < length; j++) {
4759 if (n->in(j) == in) {
4760 cnt--;
4761 }
4762 }
4763 assert(cnt == 0, "Mismatched edge count.");
4764 } else if (in == NULL) {
4765 assert(i == 0 || i >= n->req() ||
4766 n->is_Region() || n->is_Phi() || n->is_ArrayCopy() ||
4767 (n->is_Allocate() && i >= AllocateNode::InlineType) ||
4768 (n->is_Unlock() && i == (n->req() - 1)) ||
4769 (n->is_MemBar() && i == 5), // the precedence edge to a membar can be removed during macro node expansion
4770 "only region, phi, arraycopy, allocate, unlock or membar nodes have null data edges");
4771 } else {
4772 assert(in->is_top(), "sanity");
4773 // Nothing to check.
4774 }
4775 }
4776 }
4777 }
4778
4779 //------------------------------verify_graph_edges---------------------------
4780 // Walk the Graph and verify that there is a one-to-one correspondence
4781 // between Use-Def edges and Def-Use edges in the graph.
4782 void Compile::verify_graph_edges(bool no_dead_code) {
4783 if (VerifyGraphEdges) {
4784 Unique_Node_List visited;
4785
4786 // Call graph walk to check edges
4787 verify_bidirectional_edges(visited);
4788 if (no_dead_code) {
4789 // Now make sure that no visited node is used by an unvisited node.
4790 bool dead_nodes = false;
4884 // (1) subklass is already limited to a subtype of superklass => always ok
4885 // (2) subklass does not overlap with superklass => always fail
4886 // (3) superklass has NO subtypes and we can check with a simple compare.
4887 Compile::SubTypeCheckResult Compile::static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk) {
4888 if (StressReflectiveCode) {
4889 return SSC_full_test; // Let caller generate the general case.
4890 }
4891
4892 if (subk->is_java_subtype_of(superk)) {
4893 return SSC_always_true; // (0) and (1) this test cannot fail
4894 }
4895
4896 if (!subk->maybe_java_subtype_of(superk)) {
4897 return SSC_always_false; // (2) true path dead; no dynamic test needed
4898 }
4899
4900 const Type* superelem = superk;
4901 if (superk->isa_aryklassptr()) {
4902 int ignored;
4903 superelem = superk->is_aryklassptr()->base_element_type(ignored);
4904
4905 // Do not fold the subtype check to an array klass pointer comparison for [V? arrays.
4906 // [QMyValue is a subtype of [LMyValue but the klass for [QMyValue is not equal to
4907 // the klass for [LMyValue. Perform a full test.
4908 if (!superk->is_aryklassptr()->is_null_free() && superk->is_aryklassptr()->elem()->isa_instklassptr() &&
4909 superk->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->is_inlinetype()) {
4910 return SSC_full_test;
4911 }
4912 }
4913
4914 if (superelem->isa_instklassptr()) {
4915 ciInstanceKlass* ik = superelem->is_instklassptr()->instance_klass();
4916 if (!ik->has_subklass()) {
4917 if (!ik->is_final()) {
4918 // Add a dependency if there is a chance of a later subclass.
4919 dependencies()->assert_leaf_type(ik);
4920 }
4921 if (!superk->maybe_java_subtype_of(subk)) {
4922 return SSC_always_false;
4923 }
4924 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4925 }
4926 } else {
4927 // A primitive array type has no subtypes.
4928 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4929 }
4930
4931 return SSC_full_test;
5443 const Type* t = igvn.type_or_null(n);
5444 assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
5445 if (n->is_Type()) {
5446 t = n->as_Type()->type();
5447 assert(t == t->remove_speculative(), "no more speculative types");
5448 }
5449 // Iterate over outs - endless loops is unreachable from below
5450 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5451 Node *m = n->fast_out(i);
5452 if (not_a_node(m)) {
5453 continue;
5454 }
5455 worklist.push(m);
5456 }
5457 }
5458 igvn.check_no_speculative_types();
5459 #endif
5460 }
5461 }
5462
5463 Node* Compile::optimize_acmp(PhaseGVN* phase, Node* a, Node* b) {
5464 const TypeInstPtr* ta = phase->type(a)->isa_instptr();
5465 const TypeInstPtr* tb = phase->type(b)->isa_instptr();
5466 if (!EnableValhalla || ta == NULL || tb == NULL ||
5467 ta->is_zero_type() || tb->is_zero_type() ||
5468 !ta->can_be_inline_type() || !tb->can_be_inline_type()) {
5469 // Use old acmp if one operand is null or not an inline type
5470 return new CmpPNode(a, b);
5471 } else if (ta->is_inlinetypeptr() || tb->is_inlinetypeptr()) {
5472 // We know that one operand is an inline type. Therefore,
5473 // new acmp will only return true if both operands are NULL.
5474 // Check if both operands are null by or'ing the oops.
5475 a = phase->transform(new CastP2XNode(NULL, a));
5476 b = phase->transform(new CastP2XNode(NULL, b));
5477 a = phase->transform(new OrXNode(a, b));
5478 return new CmpXNode(a, phase->MakeConX(0));
5479 }
5480 // Use new acmp
5481 return NULL;
5482 }
5483
5484 // Auxiliary methods to support randomized stressing/fuzzing.
5485
5486 int Compile::random() {
5487 _stress_seed = os::next_random(_stress_seed);
5488 return static_cast<int>(_stress_seed);
5489 }
5490
5491 // This method can be called the arbitrary number of times, with current count
5492 // as the argument. The logic allows selecting a single candidate from the
5493 // running list of candidates as follows:
5494 // int count = 0;
5495 // Cand* selected = null;
5496 // while(cand = cand->next()) {
5497 // if (randomized_select(++count)) {
5498 // selected = cand;
5499 // }
5500 // }
5501 //
5502 // Including count equalizes the chances any candidate is "selected".
5503 // This is useful when we don't have the complete list of candidates to choose
|