40 #include "gc/shared/barrierSet.hpp"
41 #include "gc/shared/c2/barrierSetC2.hpp"
42 #include "jfr/jfrEvents.hpp"
43 #include "jvm_io.h"
44 #include "memory/allocation.hpp"
45 #include "memory/resourceArea.hpp"
46 #include "opto/addnode.hpp"
47 #include "opto/block.hpp"
48 #include "opto/c2compiler.hpp"
49 #include "opto/callGenerator.hpp"
50 #include "opto/callnode.hpp"
51 #include "opto/castnode.hpp"
52 #include "opto/cfgnode.hpp"
53 #include "opto/chaitin.hpp"
54 #include "opto/compile.hpp"
55 #include "opto/connode.hpp"
56 #include "opto/convertnode.hpp"
57 #include "opto/divnode.hpp"
58 #include "opto/escape.hpp"
59 #include "opto/idealGraphPrinter.hpp"
60 #include "opto/locknode.hpp"
61 #include "opto/loopnode.hpp"
62 #include "opto/machnode.hpp"
63 #include "opto/macro.hpp"
64 #include "opto/matcher.hpp"
65 #include "opto/mathexactnode.hpp"
66 #include "opto/memnode.hpp"
67 #include "opto/mulnode.hpp"
68 #include "opto/narrowptrnode.hpp"
69 #include "opto/node.hpp"
70 #include "opto/opcodes.hpp"
71 #include "opto/output.hpp"
72 #include "opto/parse.hpp"
73 #include "opto/phaseX.hpp"
74 #include "opto/rootnode.hpp"
75 #include "opto/runtime.hpp"
76 #include "opto/stringopts.hpp"
77 #include "opto/type.hpp"
78 #include "opto/vector.hpp"
79 #include "opto/vectornode.hpp"
385 // as dead to be conservative about the dead node count at any
386 // given time.
387 if (!dead->is_Con()) {
388 record_dead_node(dead->_idx);
389 }
390 if (dead->is_macro()) {
391 remove_macro_node(dead);
392 }
393 if (dead->is_expensive()) {
394 remove_expensive_node(dead);
395 }
396 if (dead->is_OpaqueTemplateAssertionPredicate()) {
397 remove_template_assertion_predicate_opaq(dead);
398 }
399 if (dead->is_ParsePredicate()) {
400 remove_parse_predicate(dead->as_ParsePredicate());
401 }
402 if (dead->for_post_loop_opts_igvn()) {
403 remove_from_post_loop_opts_igvn(dead);
404 }
405 if (dead->is_Call()) {
406 remove_useless_late_inlines( &_late_inlines, dead);
407 remove_useless_late_inlines( &_string_late_inlines, dead);
408 remove_useless_late_inlines( &_boxing_late_inlines, dead);
409 remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
410
411 if (dead->is_CallStaticJava()) {
412 remove_unstable_if_trap(dead->as_CallStaticJava(), false);
413 }
414 }
415 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
416 bs->unregister_potential_barrier_node(dead);
417 }
418
419 // Disconnect all useless nodes by disconnecting those at the boundary.
420 void Compile::disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist) {
421 uint next = 0;
422 while (next < useful.size()) {
423 Node *n = useful.at(next++);
424 if (n->is_SafePoint()) {
426 // beyond that point.
427 n->as_SafePoint()->delete_replaced_nodes();
428 }
429 // Use raw traversal of out edges since this code removes out edges
430 int max = n->outcnt();
431 for (int j = 0; j < max; ++j) {
432 Node* child = n->raw_out(j);
433 if (!useful.member(child)) {
434 assert(!child->is_top() || child != top(),
435 "If top is cached in Compile object it is in useful list");
436 // Only need to remove this out-edge to the useless node
437 n->raw_del_out(j);
438 --j;
439 --max;
440 }
441 }
442 if (n->outcnt() == 1 && n->has_special_unique_user()) {
443 assert(useful.member(n->unique_out()), "do not push a useless node");
444 worklist.push(n->unique_out());
445 }
446 }
447
448 remove_useless_nodes(_macro_nodes, useful); // remove useless macro nodes
449 remove_useless_nodes(_parse_predicates, useful); // remove useless Parse Predicate nodes
450 remove_useless_nodes(_template_assertion_predicate_opaqs, useful); // remove useless Assertion Predicate opaque nodes
451 remove_useless_nodes(_expensive_nodes, useful); // remove useless expensive nodes
452 remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
453 remove_useless_unstable_if_traps(useful); // remove useless unstable_if traps
454 remove_useless_coarsened_locks(useful); // remove useless coarsened locks nodes
455 #ifdef ASSERT
456 if (_modified_nodes != nullptr) {
457 _modified_nodes->remove_useless_nodes(useful.member_set());
458 }
459 #endif
460
461 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
462 bs->eliminate_useless_gc_barriers(useful, this);
463 // clean up the late inline lists
464 remove_useless_late_inlines( &_late_inlines, useful);
465 remove_useless_late_inlines( &_string_late_inlines, useful);
466 remove_useless_late_inlines( &_boxing_late_inlines, useful);
467 remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
468 debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
469 }
470
471 // ============================================================================
472 //------------------------------CompileWrapper---------------------------------
612
613
614 Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
615 Options options, DirectiveSet* directive)
616 : Phase(Compiler),
617 _compile_id(ci_env->compile_id()),
618 _options(options),
619 _method(target),
620 _entry_bci(osr_bci),
621 _ilt(nullptr),
622 _stub_function(nullptr),
623 _stub_name(nullptr),
624 _stub_entry_point(nullptr),
625 _max_node_limit(MaxNodeLimit),
626 _post_loop_opts_phase(false),
627 _allow_macro_nodes(true),
628 _inlining_progress(false),
629 _inlining_incrementally(false),
630 _do_cleanup(false),
631 _has_reserved_stack_access(target->has_reserved_stack_access()),
632 #ifndef PRODUCT
633 _igv_idx(0),
634 _trace_opto_output(directive->TraceOptoOutputOption),
635 #endif
636 _has_method_handle_invokes(false),
637 _clinit_barrier_on_entry(false),
638 _stress_seed(0),
639 _comp_arena(mtCompiler),
640 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
641 _env(ci_env),
642 _directive(directive),
643 _log(ci_env->log()),
644 _first_failure_details(nullptr),
645 _intrinsics (comp_arena(), 0, 0, nullptr),
646 _macro_nodes (comp_arena(), 8, 0, nullptr),
647 _parse_predicates (comp_arena(), 8, 0, nullptr),
648 _template_assertion_predicate_opaqs (comp_arena(), 8, 0, nullptr),
649 _expensive_nodes (comp_arena(), 8, 0, nullptr),
650 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
651 _unstable_if_traps (comp_arena(), 8, 0, nullptr),
652 _coarsened_locks (comp_arena(), 8, 0, nullptr),
653 _congraph(nullptr),
654 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
655 _unique(0),
656 _dead_node_count(0),
657 _dead_node_list(comp_arena()),
658 _node_arena_one(mtCompiler, Arena::Tag::tag_node),
659 _node_arena_two(mtCompiler, Arena::Tag::tag_node),
660 _node_arena(&_node_arena_one),
661 _mach_constant_base_node(nullptr),
662 _Compile_types(mtCompiler),
663 _initial_gvn(nullptr),
664 _igvn_worklist(nullptr),
665 _types(nullptr),
666 _node_hash(nullptr),
667 _late_inlines(comp_arena(), 2, 0, nullptr),
668 _string_late_inlines(comp_arena(), 2, 0, nullptr),
669 _boxing_late_inlines(comp_arena(), 2, 0, nullptr),
670 _vector_reboxing_late_inlines(comp_arena(), 2, 0, nullptr),
737
738 // GVN that will be run immediately on new nodes
739 uint estimated_size = method()->code_size()*4+64;
740 estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
741 _igvn_worklist = new (comp_arena()) Unique_Node_List(comp_arena());
742 _types = new (comp_arena()) Type_Array(comp_arena());
743 _node_hash = new (comp_arena()) NodeHash(comp_arena(), estimated_size);
744 PhaseGVN gvn;
745 set_initial_gvn(&gvn);
746
747 print_inlining_init();
748 { // Scope for timing the parser
749 TracePhase tp("parse", &timers[_t_parser]);
750
751 // Put top into the hash table ASAP.
752 initial_gvn()->transform(top());
753
754 // Set up tf(), start(), and find a CallGenerator.
755 CallGenerator* cg = nullptr;
756 if (is_osr_compilation()) {
757 const TypeTuple *domain = StartOSRNode::osr_domain();
758 const TypeTuple *range = TypeTuple::make_range(method()->signature());
759 init_tf(TypeFunc::make(domain, range));
760 StartNode* s = new StartOSRNode(root(), domain);
761 initial_gvn()->set_type_bottom(s);
762 verify_start(s);
763 cg = CallGenerator::for_osr(method(), entry_bci());
764 } else {
765 // Normal case.
766 init_tf(TypeFunc::make(method()));
767 StartNode* s = new StartNode(root(), tf()->domain());
768 initial_gvn()->set_type_bottom(s);
769 verify_start(s);
770 if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
771 // With java.lang.ref.reference.get() we must go through the
772 // intrinsic - even when get() is the root
773 // method of the compile - so that, if necessary, the value in
774 // the referent field of the reference object gets recorded by
775 // the pre-barrier code.
776 cg = find_intrinsic(method(), false);
777 }
778 if (cg == nullptr) {
779 float past_uses = method()->interpreter_invocation_count();
780 float expected_uses = past_uses;
781 cg = CallGenerator::for_inline(method(), expected_uses);
782 }
783 }
784 if (failing()) return;
785 if (cg == nullptr) {
786 const char* reason = InlineTree::check_can_parse(method());
787 assert(reason != nullptr, "expect reason for parse failure");
858 print_ideal_ir("print_ideal");
859 }
860 #endif
861
862 #ifdef ASSERT
863 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
864 bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
865 #endif
866
867 // Dump compilation data to replay it.
868 if (directive->DumpReplayOption) {
869 env()->dump_replay_data(_compile_id);
870 }
871 if (directive->DumpInlineOption && (ilt() != nullptr)) {
872 env()->dump_inline_data(_compile_id);
873 }
874
875 // Now that we know the size of all the monitors we can add a fixed slot
876 // for the original deopt pc.
877 int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
878 set_fixed_slots(next_slot);
879
880 // Compute when to use implicit null checks. Used by matching trap based
881 // nodes and NullCheck optimization.
882 set_allowed_deopt_reasons();
883
884 // Now generate code
885 Code_Gen();
886 }
887
888 //------------------------------Compile----------------------------------------
889 // Compile a runtime stub
890 Compile::Compile( ciEnv* ci_env,
891 TypeFunc_generator generator,
892 address stub_function,
893 const char *stub_name,
894 int is_fancy_jump,
895 bool pass_tls,
896 bool return_pc,
897 DirectiveSet* directive)
898 : Phase(Compiler),
899 _compile_id(0),
900 _options(Options::for_runtime_stub()),
901 _method(nullptr),
902 _entry_bci(InvocationEntryBci),
903 _stub_function(stub_function),
904 _stub_name(stub_name),
905 _stub_entry_point(nullptr),
906 _max_node_limit(MaxNodeLimit),
907 _post_loop_opts_phase(false),
908 _allow_macro_nodes(true),
909 _inlining_progress(false),
910 _inlining_incrementally(false),
911 _has_reserved_stack_access(false),
912 #ifndef PRODUCT
913 _igv_idx(0),
914 _trace_opto_output(directive->TraceOptoOutputOption),
915 #endif
916 _has_method_handle_invokes(false),
917 _clinit_barrier_on_entry(false),
918 _stress_seed(0),
919 _comp_arena(mtCompiler),
920 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
921 _env(ci_env),
922 _directive(directive),
923 _log(ci_env->log()),
924 _first_failure_details(nullptr),
925 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
926 _congraph(nullptr),
927 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
928 _unique(0),
929 _dead_node_count(0),
930 _dead_node_list(comp_arena()),
931 _node_arena_one(mtCompiler),
1038
1039 _fixed_slots = 0;
1040 set_has_split_ifs(false);
1041 set_has_loops(false); // first approximation
1042 set_has_stringbuilder(false);
1043 set_has_boxed_value(false);
1044 _trap_can_recompile = false; // no traps emitted yet
1045 _major_progress = true; // start out assuming good things will happen
1046 set_has_unsafe_access(false);
1047 set_max_vector_size(0);
1048 set_clear_upper_avx(false); //false as default for clear upper bits of ymm registers
1049 Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1050 set_decompile_count(0);
1051
1052 #ifndef PRODUCT
1053 Copy::zero_to_bytes(_igv_phase_iter, sizeof(_igv_phase_iter));
1054 #endif
1055
1056 set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
1057 _loop_opts_cnt = LoopOptsCount;
1058 set_do_inlining(Inline);
1059 set_max_inline_size(MaxInlineSize);
1060 set_freq_inline_size(FreqInlineSize);
1061 set_do_scheduling(OptoScheduling);
1062
1063 set_do_vector_loop(false);
1064 set_has_monitors(false);
1065 set_has_scoped_access(false);
1066
1067 if (AllowVectorizeOnDemand) {
1068 if (has_method() && _directive->VectorizeOption) {
1069 set_do_vector_loop(true);
1070 NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
1071 } else if (has_method() && method()->name() != nullptr &&
1072 method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1073 set_do_vector_loop(true);
1074 }
1075 }
1076 set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1077 NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
1309 // If this method has already thrown a range-check,
1310 // assume it was because we already tried range smearing
1311 // and it failed.
1312 uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1313 return !already_trapped;
1314 }
1315
1316
1317 //------------------------------flatten_alias_type-----------------------------
1318 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1319 assert(do_aliasing(), "Aliasing should be enabled");
1320 int offset = tj->offset();
1321 TypePtr::PTR ptr = tj->ptr();
1322
1323 // Known instance (scalarizable allocation) alias only with itself.
1324 bool is_known_inst = tj->isa_oopptr() != nullptr &&
1325 tj->is_oopptr()->is_known_instance();
1326
1327 // Process weird unsafe references.
1328 if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1329 assert(InlineUnsafeOps || StressReflectiveCode, "indeterminate pointers come only from unsafe ops");
1330 assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1331 tj = TypeOopPtr::BOTTOM;
1332 ptr = tj->ptr();
1333 offset = tj->offset();
1334 }
1335
1336 // Array pointers need some flattening
1337 const TypeAryPtr* ta = tj->isa_aryptr();
1338 if (ta && ta->is_stable()) {
1339 // Erase stability property for alias analysis.
1340 tj = ta = ta->cast_to_stable(false);
1341 }
1342 if( ta && is_known_inst ) {
1343 if ( offset != Type::OffsetBot &&
1344 offset > arrayOopDesc::length_offset_in_bytes() ) {
1345 offset = Type::OffsetBot; // Flatten constant access into array body only
1346 tj = ta = ta->
1347 remove_speculative()->
1348 cast_to_ptr_type(ptr)->
1349 with_offset(offset);
1350 }
1351 } else if (ta) {
1352 // For arrays indexed by constant indices, we flatten the alias
1353 // space to include all of the array body. Only the header, klass
1354 // and array length can be accessed un-aliased.
1355 if( offset != Type::OffsetBot ) {
1356 if( ta->const_oop() ) { // MethodData* or Method*
1357 offset = Type::OffsetBot; // Flatten constant access into array body
1358 tj = ta = ta->
1359 remove_speculative()->
1360 cast_to_ptr_type(ptr)->
1361 cast_to_exactness(false)->
1362 with_offset(offset);
1363 } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1364 // range is OK as-is.
1365 tj = ta = TypeAryPtr::RANGE;
1366 } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1367 tj = TypeInstPtr::KLASS; // all klass loads look alike
1368 ta = TypeAryPtr::RANGE; // generic ignored junk
1369 ptr = TypePtr::BotPTR;
1370 } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1371 tj = TypeInstPtr::MARK;
1372 ta = TypeAryPtr::RANGE; // generic ignored junk
1373 ptr = TypePtr::BotPTR;
1374 } else { // Random constant offset into array body
1375 offset = Type::OffsetBot; // Flatten constant access into array body
1376 tj = ta = ta->
1377 remove_speculative()->
1378 cast_to_ptr_type(ptr)->
1379 cast_to_exactness(false)->
1380 with_offset(offset);
1381 }
1382 }
1383 // Arrays of fixed size alias with arrays of unknown size.
1384 if (ta->size() != TypeInt::POS) {
1385 const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1386 tj = ta = ta->
1387 remove_speculative()->
1388 cast_to_ptr_type(ptr)->
1389 with_ary(tary)->
1390 cast_to_exactness(false);
1391 }
1392 // Arrays of known objects become arrays of unknown objects.
1393 if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1394 const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1395 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,offset);
1396 }
1397 if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1398 const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1399 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,offset);
1400 }
1401 // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1402 // cannot be distinguished by bytecode alone.
1403 if (ta->elem() == TypeInt::BOOL) {
1404 const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1405 ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1406 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
1407 }
1408 // During the 2nd round of IterGVN, NotNull castings are removed.
1409 // Make sure the Bottom and NotNull variants alias the same.
1410 // Also, make sure exact and non-exact variants alias the same.
1411 if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != nullptr) {
1412 tj = ta = ta->
1413 remove_speculative()->
1414 cast_to_ptr_type(TypePtr::BotPTR)->
1415 cast_to_exactness(false)->
1416 with_offset(offset);
1417 }
1418 }
1419
1420 // Oop pointers need some flattening
1421 const TypeInstPtr *to = tj->isa_instptr();
1422 if (to && to != TypeOopPtr::BOTTOM) {
1423 ciInstanceKlass* ik = to->instance_klass();
1424 if( ptr == TypePtr::Constant ) {
1425 if (ik != ciEnv::current()->Class_klass() ||
1426 offset < ik->layout_helper_size_in_bytes()) {
1436 } else if( is_known_inst ) {
1437 tj = to; // Keep NotNull and klass_is_exact for instance type
1438 } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1439 // During the 2nd round of IterGVN, NotNull castings are removed.
1440 // Make sure the Bottom and NotNull variants alias the same.
1441 // Also, make sure exact and non-exact variants alias the same.
1442 tj = to = to->
1443 remove_speculative()->
1444 cast_to_instance_id(TypeOopPtr::InstanceBot)->
1445 cast_to_ptr_type(TypePtr::BotPTR)->
1446 cast_to_exactness(false);
1447 }
1448 if (to->speculative() != nullptr) {
1449 tj = to = to->remove_speculative();
1450 }
1451 // Canonicalize the holder of this field
1452 if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1453 // First handle header references such as a LoadKlassNode, even if the
1454 // object's klass is unloaded at compile time (4965979).
1455 if (!is_known_inst) { // Do it only for non-instance types
1456 tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, nullptr, offset);
1457 }
1458 } else if (offset < 0 || offset >= ik->layout_helper_size_in_bytes()) {
1459 // Static fields are in the space above the normal instance
1460 // fields in the java.lang.Class instance.
1461 if (ik != ciEnv::current()->Class_klass()) {
1462 to = nullptr;
1463 tj = TypeOopPtr::BOTTOM;
1464 offset = tj->offset();
1465 }
1466 } else {
1467 ciInstanceKlass *canonical_holder = ik->get_canonical_holder(offset);
1468 assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1469 assert(tj->offset() == offset, "no change to offset expected");
1470 bool xk = to->klass_is_exact();
1471 int instance_id = to->instance_id();
1472
1473 // If the input type's class is the holder: if exact, the type only includes interfaces implemented by the holder
1474 // but if not exact, it may include extra interfaces: build new type from the holder class to make sure only
1475 // its interfaces are included.
1476 if (xk && ik->equals(canonical_holder)) {
1477 assert(tj == TypeInstPtr::make(to->ptr(), canonical_holder, is_known_inst, nullptr, offset, instance_id), "exact type should be canonical type");
1478 } else {
1479 assert(xk || !is_known_inst, "Known instance should be exact type");
1480 tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, is_known_inst, nullptr, offset, instance_id);
1481 }
1482 }
1483 }
1484
1485 // Klass pointers to object array klasses need some flattening
1486 const TypeKlassPtr *tk = tj->isa_klassptr();
1487 if( tk ) {
1488 // If we are referencing a field within a Klass, we need
1489 // to assume the worst case of an Object. Both exact and
1490 // inexact types must flatten to the same alias class so
1491 // use NotNull as the PTR.
1492 if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1493 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull,
1494 env()->Object_klass(),
1495 offset);
1496 }
1497
1498 if (tk->isa_aryklassptr() && tk->is_aryklassptr()->elem()->isa_klassptr()) {
1499 ciKlass* k = ciObjArrayKlass::make(env()->Object_klass());
1500 if (!k || !k->is_loaded()) { // Only fails for some -Xcomp runs
1501 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull, env()->Object_klass(), offset);
1502 } else {
1503 tj = tk = TypeAryKlassPtr::make(TypePtr::NotNull, tk->is_aryklassptr()->elem(), k, offset);
1504 }
1505 }
1506
1507 // Check for precise loads from the primary supertype array and force them
1508 // to the supertype cache alias index. Check for generic array loads from
1509 // the primary supertype array and also force them to the supertype cache
1510 // alias index. Since the same load can reach both, we need to merge
1511 // these 2 disparate memories into the same alias class. Since the
1512 // primary supertype array is read-only, there's no chance of confusion
1513 // where we bypass an array load and an array store.
1514 int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1515 if (offset == Type::OffsetBot ||
1516 (offset >= primary_supers_offset &&
1517 offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1518 offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1519 offset = in_bytes(Klass::secondary_super_cache_offset());
1520 tj = tk = tk->with_offset(offset);
1521 }
1522 }
1523
1524 // Flatten all Raw pointers together.
1525 if (tj->base() == Type::RawPtr)
1526 tj = TypeRawPtr::BOTTOM;
1616 intptr_t key = (intptr_t) adr_type;
1617 key ^= key >> logAliasCacheSize;
1618 return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1619 }
1620
1621
1622 //-----------------------------grow_alias_types--------------------------------
1623 void Compile::grow_alias_types() {
1624 const int old_ats = _max_alias_types; // how many before?
1625 const int new_ats = old_ats; // how many more?
1626 const int grow_ats = old_ats+new_ats; // how many now?
1627 _max_alias_types = grow_ats;
1628 _alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1629 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1630 Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1631 for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i];
1632 }
1633
1634
1635 //--------------------------------find_alias_type------------------------------
1636 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
1637 if (!do_aliasing()) {
1638 return alias_type(AliasIdxBot);
1639 }
1640
1641 AliasCacheEntry* ace = probe_alias_cache(adr_type);
1642 if (ace->_adr_type == adr_type) {
1643 return alias_type(ace->_index);
1644 }
1645
1646 // Handle special cases.
1647 if (adr_type == nullptr) return alias_type(AliasIdxTop);
1648 if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot);
1649
1650 // Do it the slow way.
1651 const TypePtr* flat = flatten_alias_type(adr_type);
1652
1653 #ifdef ASSERT
1654 {
1655 ResourceMark rm;
1656 assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1657 Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1658 assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1659 Type::str(adr_type));
1660 if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1661 const TypeOopPtr* foop = flat->is_oopptr();
1662 // Scalarizable allocations have exact klass always.
1663 bool exact = !foop->klass_is_exact() || foop->is_known_instance();
1673 if (alias_type(i)->adr_type() == flat) {
1674 idx = i;
1675 break;
1676 }
1677 }
1678
1679 if (idx == AliasIdxTop) {
1680 if (no_create) return nullptr;
1681 // Grow the array if necessary.
1682 if (_num_alias_types == _max_alias_types) grow_alias_types();
1683 // Add a new alias type.
1684 idx = _num_alias_types++;
1685 _alias_types[idx]->Init(idx, flat);
1686 if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false);
1687 if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false);
1688 if (flat->isa_instptr()) {
1689 if (flat->offset() == java_lang_Class::klass_offset()
1690 && flat->is_instptr()->instance_klass() == env()->Class_klass())
1691 alias_type(idx)->set_rewritable(false);
1692 }
1693 if (flat->isa_aryptr()) {
1694 #ifdef ASSERT
1695 const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1696 // (T_BYTE has the weakest alignment and size restrictions...)
1697 assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1698 #endif
1699 if (flat->offset() == TypePtr::OffsetBot) {
1700 alias_type(idx)->set_element(flat->is_aryptr()->elem());
1701 }
1702 }
1703 if (flat->isa_klassptr()) {
1704 if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1705 alias_type(idx)->set_rewritable(false);
1706 if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1707 alias_type(idx)->set_rewritable(false);
1708 if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1709 alias_type(idx)->set_rewritable(false);
1710 if (flat->offset() == in_bytes(Klass::misc_flags_offset()))
1711 alias_type(idx)->set_rewritable(false);
1712 if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1713 alias_type(idx)->set_rewritable(false);
1714 if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1715 alias_type(idx)->set_rewritable(false);
1716 }
1717 // %%% (We would like to finalize JavaThread::threadObj_offset(),
1718 // but the base pointer type is not distinctive enough to identify
1719 // references into JavaThread.)
1720
1721 // Check for final fields.
1722 const TypeInstPtr* tinst = flat->isa_instptr();
1723 if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1724 ciField* field;
1725 if (tinst->const_oop() != nullptr &&
1726 tinst->instance_klass() == ciEnv::current()->Class_klass() &&
1727 tinst->offset() >= (tinst->instance_klass()->layout_helper_size_in_bytes())) {
1728 // static field
1729 ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1730 field = k->get_field_by_offset(tinst->offset(), true);
1731 } else {
1732 ciInstanceKlass *k = tinst->instance_klass();
1733 field = k->get_field_by_offset(tinst->offset(), false);
1734 }
1735 assert(field == nullptr ||
1736 original_field == nullptr ||
1737 (field->holder() == original_field->holder() &&
1738 field->offset_in_bytes() == original_field->offset_in_bytes() &&
1739 field->is_static() == original_field->is_static()), "wrong field?");
1740 // Set field() and is_rewritable() attributes.
1741 if (field != nullptr) alias_type(idx)->set_field(field);
1742 }
1743 }
1744
1745 // Fill the cache for next time.
1746 ace->_adr_type = adr_type;
1747 ace->_index = idx;
1748 assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
1749
1750 // Might as well try to fill the cache for the flattened version, too.
1751 AliasCacheEntry* face = probe_alias_cache(flat);
1752 if (face->_adr_type == nullptr) {
1753 face->_adr_type = flat;
1754 face->_index = idx;
1755 assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1756 }
1757
1758 return alias_type(idx);
1759 }
1760
1761
1762 Compile::AliasType* Compile::alias_type(ciField* field) {
1763 const TypeOopPtr* t;
1764 if (field->is_static())
1765 t = TypeInstPtr::make(field->holder()->java_mirror());
1766 else
1767 t = TypeOopPtr::make_from_klass_raw(field->holder());
1768 AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1769 assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1770 return atp;
1771 }
1772
1773
1774 //------------------------------have_alias_type--------------------------------
1775 bool Compile::have_alias_type(const TypePtr* adr_type) {
1855 assert(!C->major_progress(), "not cleared");
1856
1857 if (_for_post_loop_igvn.length() > 0) {
1858 while (_for_post_loop_igvn.length() > 0) {
1859 Node* n = _for_post_loop_igvn.pop();
1860 n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1861 igvn._worklist.push(n);
1862 }
1863 igvn.optimize();
1864 if (failing()) return;
1865 assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1866 assert(C->parse_predicate_count() == 0, "all parse predicates should have been removed now");
1867
1868 // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1869 if (C->major_progress()) {
1870 C->clear_major_progress(); // ensure that major progress is now clear
1871 }
1872 }
1873 }
1874
1875 void Compile::record_unstable_if_trap(UnstableIfTrap* trap) {
1876 if (OptimizeUnstableIf) {
1877 _unstable_if_traps.append(trap);
1878 }
1879 }
1880
1881 void Compile::remove_useless_unstable_if_traps(Unique_Node_List& useful) {
1882 for (int i = _unstable_if_traps.length() - 1; i >= 0; i--) {
1883 UnstableIfTrap* trap = _unstable_if_traps.at(i);
1884 Node* n = trap->uncommon_trap();
1885 if (!useful.member(n)) {
1886 _unstable_if_traps.delete_at(i); // replaces i-th with last element which is known to be useful (already processed)
1887 }
1888 }
1889 }
1890
1891 // Remove the unstable if trap associated with 'unc' from candidates. It is either dead
1892 // or fold-compares case. Return true if succeed or not found.
1893 //
1894 // In rare cases, the found trap has been processed. It is too late to delete it. Return
1930 assert(next_bci == iter.next_bci() || next_bci == iter.get_dest(), "wrong next_bci at unstable_if");
1931 Bytecodes::Code c = iter.cur_bc();
1932 Node* lhs = nullptr;
1933 Node* rhs = nullptr;
1934 if (c == Bytecodes::_if_acmpeq || c == Bytecodes::_if_acmpne) {
1935 lhs = unc->peek_operand(0);
1936 rhs = unc->peek_operand(1);
1937 } else if (c == Bytecodes::_ifnull || c == Bytecodes::_ifnonnull) {
1938 lhs = unc->peek_operand(0);
1939 }
1940
1941 ResourceMark rm;
1942 const MethodLivenessResult& live_locals = method->liveness_at_bci(next_bci);
1943 assert(live_locals.is_valid(), "broken liveness info");
1944 int len = (int)live_locals.size();
1945
1946 for (int i = 0; i < len; i++) {
1947 Node* local = unc->local(jvms, i);
1948 // kill local using the liveness of next_bci.
1949 // give up when the local looks like an operand to secure reexecution.
1950 if (!live_locals.at(i) && !local->is_top() && local != lhs && local!= rhs) {
1951 uint idx = jvms->locoff() + i;
1952 #ifdef ASSERT
1953 if (PrintOpto && Verbose) {
1954 tty->print("[unstable_if] kill local#%d: ", idx);
1955 local->dump();
1956 tty->cr();
1957 }
1958 #endif
1959 igvn.replace_input_of(unc, idx, top());
1960 modified = true;
1961 }
1962 }
1963 }
1964
1965 // keep the mondified trap for late query
1966 if (modified) {
1967 trap->set_modified();
1968 } else {
1969 _unstable_if_traps.delete_at(i);
1970 }
1971 }
1972 igvn.optimize();
1973 }
1974
1975 // StringOpts and late inlining of string methods
1976 void Compile::inline_string_calls(bool parse_time) {
1977 {
1978 // remove useless nodes to make the usage analysis simpler
1979 ResourceMark rm;
1980 PhaseRemoveUseless pru(initial_gvn(), *igvn_worklist());
1981 }
1982
1983 {
1984 ResourceMark rm;
1985 print_method(PHASE_BEFORE_STRINGOPTS, 3);
2140
2141 if (_string_late_inlines.length() > 0) {
2142 assert(has_stringbuilder(), "inconsistent");
2143
2144 inline_string_calls(false);
2145
2146 if (failing()) return;
2147
2148 inline_incrementally_cleanup(igvn);
2149 }
2150
2151 set_inlining_incrementally(false);
2152 }
2153
2154 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2155 // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2156 // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2157 // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == nullptr"
2158 // as if "inlining_incrementally() == true" were set.
2159 assert(inlining_incrementally() == false, "not allowed");
2160 assert(_modified_nodes == nullptr, "not allowed");
2161 assert(_late_inlines.length() > 0, "sanity");
2162
2163 while (_late_inlines.length() > 0) {
2164 igvn_worklist()->ensure_empty(); // should be done with igvn
2165
2166 while (inline_incrementally_one()) {
2167 assert(!failing_internal() || failure_is_artificial(), "inconsistent");
2168 }
2169 if (failing()) return;
2170
2171 inline_incrementally_cleanup(igvn);
2172 }
2173 }
2174
2175 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2176 if (_loop_opts_cnt > 0) {
2177 while (major_progress() && (_loop_opts_cnt > 0)) {
2178 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2179 PhaseIdealLoop::optimize(igvn, mode);
2180 _loop_opts_cnt--;
2181 if (failing()) return false;
2182 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2183 }
2184 }
2185 return true;
2186 }
2187
2188 // Remove edges from "root" to each SafePoint at a backward branch.
2189 // They were inserted during parsing (see add_safepoint()) to make
2190 // infinite loops without calls or exceptions visible to root, i.e.,
2191 // useful.
2192 void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) {
2299 print_method(PHASE_ITER_GVN_AFTER_VECTOR, 2);
2300 }
2301 assert(!has_vbox_nodes(), "sanity");
2302
2303 if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2304 Compile::TracePhase tp("", &timers[_t_renumberLive]);
2305 igvn_worklist()->ensure_empty(); // should be done with igvn
2306 {
2307 ResourceMark rm;
2308 PhaseRenumberLive prl(initial_gvn(), *igvn_worklist());
2309 }
2310 igvn.reset_from_gvn(initial_gvn());
2311 igvn.optimize();
2312 if (failing()) return;
2313 }
2314
2315 // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
2316 // safepoints
2317 remove_root_to_sfpts_edges(igvn);
2318
2319 if (failing()) return;
2320
2321 // Perform escape analysis
2322 if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) {
2323 if (has_loops()) {
2324 // Cleanup graph (remove dead nodes).
2325 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2326 PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2327 if (failing()) return;
2328 }
2329 bool progress;
2330 print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2331 do {
2332 ConnectionGraph::do_analysis(this, &igvn);
2333
2334 if (failing()) return;
2335
2336 int mcount = macro_count(); // Record number of allocations and locks before IGVN
2337
2338 // Optimize out fields loads from scalar replaceable allocations.
2422 if (failing()) return;
2423
2424 // Loop transforms on the ideal graph. Range Check Elimination,
2425 // peeling, unrolling, etc.
2426 if (!optimize_loops(igvn, LoopOptsDefault)) {
2427 return;
2428 }
2429
2430 if (failing()) return;
2431
2432 C->clear_major_progress(); // ensure that major progress is now clear
2433
2434 process_for_post_loop_opts_igvn(igvn);
2435
2436 if (failing()) return;
2437
2438 #ifdef ASSERT
2439 bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2440 #endif
2441
2442 {
2443 TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2444 print_method(PHASE_BEFORE_MACRO_EXPANSION, 3);
2445 PhaseMacroExpand mex(igvn);
2446 if (mex.expand_macro_nodes()) {
2447 assert(failing(), "must bail out w/ explicit message");
2448 return;
2449 }
2450 print_method(PHASE_AFTER_MACRO_EXPANSION, 2);
2451 }
2452
2453 {
2454 TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
2455 if (bs->expand_barriers(this, igvn)) {
2456 assert(failing(), "must bail out w/ explicit message");
2457 return;
2458 }
2459 print_method(PHASE_BARRIER_EXPANSION, 2);
2460 }
2461
2462 if (C->max_vector_size() > 0) {
2463 C->optimize_logic_cones(igvn);
2464 igvn.optimize();
2465 if (failing()) return;
2466 }
2467
2468 DEBUG_ONLY( _modified_nodes = nullptr; )
2469
2470 assert(igvn._worklist.size() == 0, "not empty");
2471
2472 assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2473
2474 if (_late_inlines.length() > 0) {
2475 // More opportunities to optimize virtual and MH calls.
2476 // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2477 process_late_inline_calls_no_inline(igvn);
2478 if (failing()) return;
2479 }
2480 } // (End scope of igvn; run destructor if necessary for asserts.)
2481
2482 check_no_dead_use();
2483
2484 process_print_inlining();
2485
2486 // We will never use the NodeHash table any more. Clear it so that final_graph_reshaping does not have
2487 // to remove hashes to unlock nodes for modifications.
2488 C->node_hash()->clear();
2489
2490 // A method with only infinite loops has no edges entering loops from root
2491 {
2492 TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2493 if (final_graph_reshaping()) {
2494 assert(failing(), "must bail out w/ explicit message");
2495 return;
2496 }
2497 }
2498
2499 print_method(PHASE_OPTIMIZE_FINISHED, 2);
3766 k->subsume_by(m, this);
3767 }
3768 }
3769 }
3770 break;
3771 }
3772 case Op_CmpUL: {
3773 if (!Matcher::has_match_rule(Op_CmpUL)) {
3774 // No support for unsigned long comparisons
3775 ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3776 Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3777 Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3778 ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3779 Node* andl = new AndLNode(orl, remove_sign_mask);
3780 Node* cmp = new CmpLNode(andl, n->in(2));
3781 n->subsume_by(cmp, this);
3782 }
3783 break;
3784 }
3785 #ifdef ASSERT
3786 case Op_ConNKlass: {
3787 const TypePtr* tp = n->as_Type()->type()->make_ptr();
3788 ciKlass* klass = tp->is_klassptr()->exact_klass();
3789 assert(klass->is_in_encoding_range(), "klass cannot be compressed");
3790 break;
3791 }
3792 #endif
3793 default:
3794 assert(!n->is_Call(), "");
3795 assert(!n->is_Mem(), "");
3796 assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3797 break;
3798 }
3799 }
3800
3801 //------------------------------final_graph_reshaping_walk---------------------
3802 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3803 // requires that the walk visits a node's inputs before visiting the node.
3804 void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
3805 Unique_Node_List sfpt;
4152 }
4153 }
4154
4155 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4156 return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4157 }
4158
4159 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4160 return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4161 }
4162
4163 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4164 if (holder->is_initialized()) {
4165 return false;
4166 }
4167 if (holder->is_being_initialized()) {
4168 if (accessing_method->holder() == holder) {
4169 // Access inside a class. The barrier can be elided when access happens in <clinit>,
4170 // <init>, or a static method. In all those cases, there was an initialization
4171 // barrier on the holder klass passed.
4172 if (accessing_method->is_static_initializer() ||
4173 accessing_method->is_object_initializer() ||
4174 accessing_method->is_static()) {
4175 return false;
4176 }
4177 } else if (accessing_method->holder()->is_subclass_of(holder)) {
4178 // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4179 // In case of <init> or a static method, the barrier is on the subclass is not enough:
4180 // child class can become fully initialized while its parent class is still being initialized.
4181 if (accessing_method->is_static_initializer()) {
4182 return false;
4183 }
4184 }
4185 ciMethod* root = method(); // the root method of compilation
4186 if (root != accessing_method) {
4187 return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4188 }
4189 }
4190 return true;
4191 }
4192
4193 #ifndef PRODUCT
4194 //------------------------------verify_bidirectional_edges---------------------
4195 // For each input edge to a node (ie - for each Use-Def edge), verify that
4196 // there is a corresponding Def-Use edge.
4197 void Compile::verify_bidirectional_edges(Unique_Node_List &visited) {
4198 // Allocate stack of size C->live_nodes()/16 to avoid frequent realloc
4199 uint stack_size = live_nodes() >> 4;
4200 Node_List nstack(MAX2(stack_size, (uint)OptoNodeListSize));
4201 nstack.push(_root);
4217 if (in != nullptr && !in->is_top()) {
4218 // Count instances of `next`
4219 int cnt = 0;
4220 for (uint idx = 0; idx < in->_outcnt; idx++) {
4221 if (in->_out[idx] == n) {
4222 cnt++;
4223 }
4224 }
4225 assert(cnt > 0, "Failed to find Def-Use edge.");
4226 // Check for duplicate edges
4227 // walk the input array downcounting the input edges to n
4228 for (uint j = 0; j < length; j++) {
4229 if (n->in(j) == in) {
4230 cnt--;
4231 }
4232 }
4233 assert(cnt == 0, "Mismatched edge count.");
4234 } else if (in == nullptr) {
4235 assert(i == 0 || i >= n->req() ||
4236 n->is_Region() || n->is_Phi() || n->is_ArrayCopy() ||
4237 (n->is_Unlock() && i == (n->req() - 1)) ||
4238 (n->is_MemBar() && i == 5), // the precedence edge to a membar can be removed during macro node expansion
4239 "only region, phi, arraycopy, unlock or membar nodes have null data edges");
4240 } else {
4241 assert(in->is_top(), "sanity");
4242 // Nothing to check.
4243 }
4244 }
4245 }
4246 }
4247
4248 //------------------------------verify_graph_edges---------------------------
4249 // Walk the Graph and verify that there is a one-to-one correspondence
4250 // between Use-Def edges and Def-Use edges in the graph.
4251 void Compile::verify_graph_edges(bool no_dead_code) {
4252 if (VerifyGraphEdges) {
4253 Unique_Node_List visited;
4254
4255 // Call graph walk to check edges
4256 verify_bidirectional_edges(visited);
4257 if (no_dead_code) {
4258 // Now make sure that no visited node is used by an unvisited node.
4259 bool dead_nodes = false;
4353 // (1) subklass is already limited to a subtype of superklass => always ok
4354 // (2) subklass does not overlap with superklass => always fail
4355 // (3) superklass has NO subtypes and we can check with a simple compare.
4356 Compile::SubTypeCheckResult Compile::static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk, bool skip) {
4357 if (skip) {
4358 return SSC_full_test; // Let caller generate the general case.
4359 }
4360
4361 if (subk->is_java_subtype_of(superk)) {
4362 return SSC_always_true; // (0) and (1) this test cannot fail
4363 }
4364
4365 if (!subk->maybe_java_subtype_of(superk)) {
4366 return SSC_always_false; // (2) true path dead; no dynamic test needed
4367 }
4368
4369 const Type* superelem = superk;
4370 if (superk->isa_aryklassptr()) {
4371 int ignored;
4372 superelem = superk->is_aryklassptr()->base_element_type(ignored);
4373 }
4374
4375 if (superelem->isa_instklassptr()) {
4376 ciInstanceKlass* ik = superelem->is_instklassptr()->instance_klass();
4377 if (!ik->has_subklass()) {
4378 if (!ik->is_final()) {
4379 // Add a dependency if there is a chance of a later subclass.
4380 dependencies()->assert_leaf_type(ik);
4381 }
4382 if (!superk->maybe_java_subtype_of(subk)) {
4383 return SSC_always_false;
4384 }
4385 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4386 }
4387 } else {
4388 // A primitive array type has no subtypes.
4389 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4390 }
4391
4392 return SSC_full_test;
4952 const Type* t = igvn.type_or_null(n);
4953 assert((t == nullptr) || (t == t->remove_speculative()), "no more speculative types");
4954 if (n->is_Type()) {
4955 t = n->as_Type()->type();
4956 assert(t == t->remove_speculative(), "no more speculative types");
4957 }
4958 // Iterate over outs - endless loops is unreachable from below
4959 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4960 Node *m = n->fast_out(i);
4961 if (not_a_node(m)) {
4962 continue;
4963 }
4964 worklist.push(m);
4965 }
4966 }
4967 igvn.check_no_speculative_types();
4968 #endif
4969 }
4970 }
4971
4972 // Auxiliary methods to support randomized stressing/fuzzing.
4973
4974 void Compile::initialize_stress_seed(const DirectiveSet* directive) {
4975 if (FLAG_IS_DEFAULT(StressSeed) || (FLAG_IS_ERGO(StressSeed) && directive->RepeatCompilationOption)) {
4976 _stress_seed = static_cast<uint>(Ticks::now().nanoseconds());
4977 FLAG_SET_ERGO(StressSeed, _stress_seed);
4978 } else {
4979 _stress_seed = StressSeed;
4980 }
4981 if (_log != nullptr) {
4982 _log->elem("stress_test seed='%u'", _stress_seed);
4983 }
4984 }
4985
4986 int Compile::random() {
4987 _stress_seed = os::next_random(_stress_seed);
4988 return static_cast<int>(_stress_seed);
4989 }
4990
4991 // This method can be called the arbitrary number of times, with current count
|
40 #include "gc/shared/barrierSet.hpp"
41 #include "gc/shared/c2/barrierSetC2.hpp"
42 #include "jfr/jfrEvents.hpp"
43 #include "jvm_io.h"
44 #include "memory/allocation.hpp"
45 #include "memory/resourceArea.hpp"
46 #include "opto/addnode.hpp"
47 #include "opto/block.hpp"
48 #include "opto/c2compiler.hpp"
49 #include "opto/callGenerator.hpp"
50 #include "opto/callnode.hpp"
51 #include "opto/castnode.hpp"
52 #include "opto/cfgnode.hpp"
53 #include "opto/chaitin.hpp"
54 #include "opto/compile.hpp"
55 #include "opto/connode.hpp"
56 #include "opto/convertnode.hpp"
57 #include "opto/divnode.hpp"
58 #include "opto/escape.hpp"
59 #include "opto/idealGraphPrinter.hpp"
60 #include "opto/inlinetypenode.hpp"
61 #include "opto/locknode.hpp"
62 #include "opto/loopnode.hpp"
63 #include "opto/machnode.hpp"
64 #include "opto/macro.hpp"
65 #include "opto/matcher.hpp"
66 #include "opto/mathexactnode.hpp"
67 #include "opto/memnode.hpp"
68 #include "opto/mulnode.hpp"
69 #include "opto/narrowptrnode.hpp"
70 #include "opto/node.hpp"
71 #include "opto/opcodes.hpp"
72 #include "opto/output.hpp"
73 #include "opto/parse.hpp"
74 #include "opto/phaseX.hpp"
75 #include "opto/rootnode.hpp"
76 #include "opto/runtime.hpp"
77 #include "opto/stringopts.hpp"
78 #include "opto/type.hpp"
79 #include "opto/vector.hpp"
80 #include "opto/vectornode.hpp"
386 // as dead to be conservative about the dead node count at any
387 // given time.
388 if (!dead->is_Con()) {
389 record_dead_node(dead->_idx);
390 }
391 if (dead->is_macro()) {
392 remove_macro_node(dead);
393 }
394 if (dead->is_expensive()) {
395 remove_expensive_node(dead);
396 }
397 if (dead->is_OpaqueTemplateAssertionPredicate()) {
398 remove_template_assertion_predicate_opaq(dead);
399 }
400 if (dead->is_ParsePredicate()) {
401 remove_parse_predicate(dead->as_ParsePredicate());
402 }
403 if (dead->for_post_loop_opts_igvn()) {
404 remove_from_post_loop_opts_igvn(dead);
405 }
406 if (dead->is_InlineType()) {
407 remove_inline_type(dead);
408 }
409 if (dead->is_Call()) {
410 remove_useless_late_inlines( &_late_inlines, dead);
411 remove_useless_late_inlines( &_string_late_inlines, dead);
412 remove_useless_late_inlines( &_boxing_late_inlines, dead);
413 remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
414
415 if (dead->is_CallStaticJava()) {
416 remove_unstable_if_trap(dead->as_CallStaticJava(), false);
417 }
418 }
419 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
420 bs->unregister_potential_barrier_node(dead);
421 }
422
423 // Disconnect all useless nodes by disconnecting those at the boundary.
424 void Compile::disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist) {
425 uint next = 0;
426 while (next < useful.size()) {
427 Node *n = useful.at(next++);
428 if (n->is_SafePoint()) {
430 // beyond that point.
431 n->as_SafePoint()->delete_replaced_nodes();
432 }
433 // Use raw traversal of out edges since this code removes out edges
434 int max = n->outcnt();
435 for (int j = 0; j < max; ++j) {
436 Node* child = n->raw_out(j);
437 if (!useful.member(child)) {
438 assert(!child->is_top() || child != top(),
439 "If top is cached in Compile object it is in useful list");
440 // Only need to remove this out-edge to the useless node
441 n->raw_del_out(j);
442 --j;
443 --max;
444 }
445 }
446 if (n->outcnt() == 1 && n->has_special_unique_user()) {
447 assert(useful.member(n->unique_out()), "do not push a useless node");
448 worklist.push(n->unique_out());
449 }
450 if (n->outcnt() == 0) {
451 worklist.push(n);
452 }
453 }
454
455 remove_useless_nodes(_macro_nodes, useful); // remove useless macro nodes
456 remove_useless_nodes(_parse_predicates, useful); // remove useless Parse Predicate nodes
457 remove_useless_nodes(_template_assertion_predicate_opaqs, useful); // remove useless Assertion Predicate opaque nodes
458 remove_useless_nodes(_expensive_nodes, useful); // remove useless expensive nodes
459 remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
460 remove_useless_nodes(_inline_type_nodes, useful); // remove useless inline type nodes
461 #ifdef ASSERT
462 if (_modified_nodes != nullptr) {
463 _modified_nodes->remove_useless_nodes(useful.member_set());
464 }
465 #endif
466 remove_useless_unstable_if_traps(useful); // remove useless unstable_if traps
467 remove_useless_coarsened_locks(useful); // remove useless coarsened locks nodes
468 #ifdef ASSERT
469 if (_modified_nodes != nullptr) {
470 _modified_nodes->remove_useless_nodes(useful.member_set());
471 }
472 #endif
473
474 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
475 bs->eliminate_useless_gc_barriers(useful, this);
476 // clean up the late inline lists
477 remove_useless_late_inlines( &_late_inlines, useful);
478 remove_useless_late_inlines( &_string_late_inlines, useful);
479 remove_useless_late_inlines( &_boxing_late_inlines, useful);
480 remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
481 debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
482 }
483
484 // ============================================================================
485 //------------------------------CompileWrapper---------------------------------
625
626
627 Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
628 Options options, DirectiveSet* directive)
629 : Phase(Compiler),
630 _compile_id(ci_env->compile_id()),
631 _options(options),
632 _method(target),
633 _entry_bci(osr_bci),
634 _ilt(nullptr),
635 _stub_function(nullptr),
636 _stub_name(nullptr),
637 _stub_entry_point(nullptr),
638 _max_node_limit(MaxNodeLimit),
639 _post_loop_opts_phase(false),
640 _allow_macro_nodes(true),
641 _inlining_progress(false),
642 _inlining_incrementally(false),
643 _do_cleanup(false),
644 _has_reserved_stack_access(target->has_reserved_stack_access()),
645 _has_circular_inline_type(false),
646 #ifndef PRODUCT
647 _igv_idx(0),
648 _trace_opto_output(directive->TraceOptoOutputOption),
649 #endif
650 _has_method_handle_invokes(false),
651 _clinit_barrier_on_entry(false),
652 _stress_seed(0),
653 _comp_arena(mtCompiler),
654 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
655 _env(ci_env),
656 _directive(directive),
657 _log(ci_env->log()),
658 _first_failure_details(nullptr),
659 _intrinsics (comp_arena(), 0, 0, nullptr),
660 _macro_nodes (comp_arena(), 8, 0, nullptr),
661 _parse_predicates (comp_arena(), 8, 0, nullptr),
662 _template_assertion_predicate_opaqs (comp_arena(), 8, 0, nullptr),
663 _expensive_nodes (comp_arena(), 8, 0, nullptr),
664 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
665 _inline_type_nodes (comp_arena(), 8, 0, nullptr),
666 _unstable_if_traps (comp_arena(), 8, 0, nullptr),
667 _coarsened_locks (comp_arena(), 8, 0, nullptr),
668 _congraph(nullptr),
669 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
670 _unique(0),
671 _dead_node_count(0),
672 _dead_node_list(comp_arena()),
673 _node_arena_one(mtCompiler, Arena::Tag::tag_node),
674 _node_arena_two(mtCompiler, Arena::Tag::tag_node),
675 _node_arena(&_node_arena_one),
676 _mach_constant_base_node(nullptr),
677 _Compile_types(mtCompiler),
678 _initial_gvn(nullptr),
679 _igvn_worklist(nullptr),
680 _types(nullptr),
681 _node_hash(nullptr),
682 _late_inlines(comp_arena(), 2, 0, nullptr),
683 _string_late_inlines(comp_arena(), 2, 0, nullptr),
684 _boxing_late_inlines(comp_arena(), 2, 0, nullptr),
685 _vector_reboxing_late_inlines(comp_arena(), 2, 0, nullptr),
752
753 // GVN that will be run immediately on new nodes
754 uint estimated_size = method()->code_size()*4+64;
755 estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
756 _igvn_worklist = new (comp_arena()) Unique_Node_List(comp_arena());
757 _types = new (comp_arena()) Type_Array(comp_arena());
758 _node_hash = new (comp_arena()) NodeHash(comp_arena(), estimated_size);
759 PhaseGVN gvn;
760 set_initial_gvn(&gvn);
761
762 print_inlining_init();
763 { // Scope for timing the parser
764 TracePhase tp("parse", &timers[_t_parser]);
765
766 // Put top into the hash table ASAP.
767 initial_gvn()->transform(top());
768
769 // Set up tf(), start(), and find a CallGenerator.
770 CallGenerator* cg = nullptr;
771 if (is_osr_compilation()) {
772 init_tf(TypeFunc::make(method(), /* is_osr_compilation = */ true));
773 StartNode* s = new StartOSRNode(root(), tf()->domain_sig());
774 initial_gvn()->set_type_bottom(s);
775 verify_start(s);
776 cg = CallGenerator::for_osr(method(), entry_bci());
777 } else {
778 // Normal case.
779 init_tf(TypeFunc::make(method()));
780 StartNode* s = new StartNode(root(), tf()->domain_cc());
781 initial_gvn()->set_type_bottom(s);
782 verify_start(s);
783 if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
784 // With java.lang.ref.reference.get() we must go through the
785 // intrinsic - even when get() is the root
786 // method of the compile - so that, if necessary, the value in
787 // the referent field of the reference object gets recorded by
788 // the pre-barrier code.
789 cg = find_intrinsic(method(), false);
790 }
791 if (cg == nullptr) {
792 float past_uses = method()->interpreter_invocation_count();
793 float expected_uses = past_uses;
794 cg = CallGenerator::for_inline(method(), expected_uses);
795 }
796 }
797 if (failing()) return;
798 if (cg == nullptr) {
799 const char* reason = InlineTree::check_can_parse(method());
800 assert(reason != nullptr, "expect reason for parse failure");
871 print_ideal_ir("print_ideal");
872 }
873 #endif
874
875 #ifdef ASSERT
876 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
877 bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
878 #endif
879
880 // Dump compilation data to replay it.
881 if (directive->DumpReplayOption) {
882 env()->dump_replay_data(_compile_id);
883 }
884 if (directive->DumpInlineOption && (ilt() != nullptr)) {
885 env()->dump_inline_data(_compile_id);
886 }
887
888 // Now that we know the size of all the monitors we can add a fixed slot
889 // for the original deopt pc.
890 int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
891 if (needs_stack_repair()) {
892 // One extra slot for the special stack increment value
893 next_slot += 2;
894 }
895 // TODO 8284443 Only reserve extra slot if needed
896 if (InlineTypeReturnedAsFields) {
897 // One extra slot to hold the IsInit information for a nullable
898 // inline type return if we run out of registers.
899 next_slot += 2;
900 }
901 set_fixed_slots(next_slot);
902
903 // Compute when to use implicit null checks. Used by matching trap based
904 // nodes and NullCheck optimization.
905 set_allowed_deopt_reasons();
906
907 // Now generate code
908 Code_Gen();
909 }
910
911 //------------------------------Compile----------------------------------------
912 // Compile a runtime stub
913 Compile::Compile( ciEnv* ci_env,
914 TypeFunc_generator generator,
915 address stub_function,
916 const char *stub_name,
917 int is_fancy_jump,
918 bool pass_tls,
919 bool return_pc,
920 DirectiveSet* directive)
921 : Phase(Compiler),
922 _compile_id(0),
923 _options(Options::for_runtime_stub()),
924 _method(nullptr),
925 _entry_bci(InvocationEntryBci),
926 _stub_function(stub_function),
927 _stub_name(stub_name),
928 _stub_entry_point(nullptr),
929 _max_node_limit(MaxNodeLimit),
930 _post_loop_opts_phase(false),
931 _allow_macro_nodes(true),
932 _inlining_progress(false),
933 _inlining_incrementally(false),
934 _has_reserved_stack_access(false),
935 _has_circular_inline_type(false),
936 #ifndef PRODUCT
937 _igv_idx(0),
938 _trace_opto_output(directive->TraceOptoOutputOption),
939 #endif
940 _has_method_handle_invokes(false),
941 _clinit_barrier_on_entry(false),
942 _stress_seed(0),
943 _comp_arena(mtCompiler),
944 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
945 _env(ci_env),
946 _directive(directive),
947 _log(ci_env->log()),
948 _first_failure_details(nullptr),
949 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
950 _congraph(nullptr),
951 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
952 _unique(0),
953 _dead_node_count(0),
954 _dead_node_list(comp_arena()),
955 _node_arena_one(mtCompiler),
1062
1063 _fixed_slots = 0;
1064 set_has_split_ifs(false);
1065 set_has_loops(false); // first approximation
1066 set_has_stringbuilder(false);
1067 set_has_boxed_value(false);
1068 _trap_can_recompile = false; // no traps emitted yet
1069 _major_progress = true; // start out assuming good things will happen
1070 set_has_unsafe_access(false);
1071 set_max_vector_size(0);
1072 set_clear_upper_avx(false); //false as default for clear upper bits of ymm registers
1073 Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1074 set_decompile_count(0);
1075
1076 #ifndef PRODUCT
1077 Copy::zero_to_bytes(_igv_phase_iter, sizeof(_igv_phase_iter));
1078 #endif
1079
1080 set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
1081 _loop_opts_cnt = LoopOptsCount;
1082 _has_flat_accesses = false;
1083 _flat_accesses_share_alias = true;
1084 _scalarize_in_safepoints = false;
1085
1086 set_do_inlining(Inline);
1087 set_max_inline_size(MaxInlineSize);
1088 set_freq_inline_size(FreqInlineSize);
1089 set_do_scheduling(OptoScheduling);
1090
1091 set_do_vector_loop(false);
1092 set_has_monitors(false);
1093 set_has_scoped_access(false);
1094
1095 if (AllowVectorizeOnDemand) {
1096 if (has_method() && _directive->VectorizeOption) {
1097 set_do_vector_loop(true);
1098 NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
1099 } else if (has_method() && method()->name() != nullptr &&
1100 method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1101 set_do_vector_loop(true);
1102 }
1103 }
1104 set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1105 NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
1337 // If this method has already thrown a range-check,
1338 // assume it was because we already tried range smearing
1339 // and it failed.
1340 uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1341 return !already_trapped;
1342 }
1343
1344
1345 //------------------------------flatten_alias_type-----------------------------
1346 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1347 assert(do_aliasing(), "Aliasing should be enabled");
1348 int offset = tj->offset();
1349 TypePtr::PTR ptr = tj->ptr();
1350
1351 // Known instance (scalarizable allocation) alias only with itself.
1352 bool is_known_inst = tj->isa_oopptr() != nullptr &&
1353 tj->is_oopptr()->is_known_instance();
1354
1355 // Process weird unsafe references.
1356 if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1357 bool default_value_load = EnableValhalla && tj->is_instptr()->instance_klass() == ciEnv::current()->Class_klass();
1358 assert(InlineUnsafeOps || StressReflectiveCode || default_value_load, "indeterminate pointers come only from unsafe ops");
1359 assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1360 tj = TypeOopPtr::BOTTOM;
1361 ptr = tj->ptr();
1362 offset = tj->offset();
1363 }
1364
1365 // Array pointers need some flattening
1366 const TypeAryPtr* ta = tj->isa_aryptr();
1367 if (ta && ta->is_stable()) {
1368 // Erase stability property for alias analysis.
1369 tj = ta = ta->cast_to_stable(false);
1370 }
1371 if (ta && ta->is_not_flat()) {
1372 // Erase not flat property for alias analysis.
1373 tj = ta = ta->cast_to_not_flat(false);
1374 }
1375 if (ta && ta->is_not_null_free()) {
1376 // Erase not null free property for alias analysis.
1377 tj = ta = ta->cast_to_not_null_free(false);
1378 }
1379
1380 if( ta && is_known_inst ) {
1381 if ( offset != Type::OffsetBot &&
1382 offset > arrayOopDesc::length_offset_in_bytes() ) {
1383 offset = Type::OffsetBot; // Flatten constant access into array body only
1384 tj = ta = ta->
1385 remove_speculative()->
1386 cast_to_ptr_type(ptr)->
1387 with_offset(offset);
1388 }
1389 } else if (ta) {
1390 // For arrays indexed by constant indices, we flatten the alias
1391 // space to include all of the array body. Only the header, klass
1392 // and array length can be accessed un-aliased.
1393 // For flat inline type array, each field has its own slice so
1394 // we must include the field offset.
1395 if( offset != Type::OffsetBot ) {
1396 if( ta->const_oop() ) { // MethodData* or Method*
1397 offset = Type::OffsetBot; // Flatten constant access into array body
1398 tj = ta = ta->
1399 remove_speculative()->
1400 cast_to_ptr_type(ptr)->
1401 cast_to_exactness(false)->
1402 with_offset(offset);
1403 } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1404 // range is OK as-is.
1405 tj = ta = TypeAryPtr::RANGE;
1406 } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1407 tj = TypeInstPtr::KLASS; // all klass loads look alike
1408 ta = TypeAryPtr::RANGE; // generic ignored junk
1409 ptr = TypePtr::BotPTR;
1410 } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1411 tj = TypeInstPtr::MARK;
1412 ta = TypeAryPtr::RANGE; // generic ignored junk
1413 ptr = TypePtr::BotPTR;
1414 } else { // Random constant offset into array body
1415 offset = Type::OffsetBot; // Flatten constant access into array body
1416 tj = ta = ta->
1417 remove_speculative()->
1418 cast_to_ptr_type(ptr)->
1419 cast_to_exactness(false)->
1420 with_offset(offset);
1421 }
1422 }
1423 // Arrays of fixed size alias with arrays of unknown size.
1424 if (ta->size() != TypeInt::POS) {
1425 const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1426 tj = ta = ta->
1427 remove_speculative()->
1428 cast_to_ptr_type(ptr)->
1429 with_ary(tary)->
1430 cast_to_exactness(false);
1431 }
1432 // Arrays of known objects become arrays of unknown objects.
1433 if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1434 const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1435 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,Type::Offset(offset), ta->field_offset());
1436 }
1437 if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1438 const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1439 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,Type::Offset(offset), ta->field_offset());
1440 }
1441 // Initially all flattened array accesses share a single slice
1442 if (ta->is_flat() && ta->elem() != TypeInstPtr::BOTTOM && _flat_accesses_share_alias) {
1443 const TypeAry* tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size(), /* stable= */ false, /* flat= */ true);
1444 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,Type::Offset(offset), Type::Offset(Type::OffsetBot));
1445 }
1446 // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1447 // cannot be distinguished by bytecode alone.
1448 if (ta->elem() == TypeInt::BOOL) {
1449 const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1450 ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1451 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset());
1452 }
1453 // During the 2nd round of IterGVN, NotNull castings are removed.
1454 // Make sure the Bottom and NotNull variants alias the same.
1455 // Also, make sure exact and non-exact variants alias the same.
1456 if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != nullptr) {
1457 tj = ta = ta->
1458 remove_speculative()->
1459 cast_to_ptr_type(TypePtr::BotPTR)->
1460 cast_to_exactness(false)->
1461 with_offset(offset);
1462 }
1463 }
1464
1465 // Oop pointers need some flattening
1466 const TypeInstPtr *to = tj->isa_instptr();
1467 if (to && to != TypeOopPtr::BOTTOM) {
1468 ciInstanceKlass* ik = to->instance_klass();
1469 if( ptr == TypePtr::Constant ) {
1470 if (ik != ciEnv::current()->Class_klass() ||
1471 offset < ik->layout_helper_size_in_bytes()) {
1481 } else if( is_known_inst ) {
1482 tj = to; // Keep NotNull and klass_is_exact for instance type
1483 } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1484 // During the 2nd round of IterGVN, NotNull castings are removed.
1485 // Make sure the Bottom and NotNull variants alias the same.
1486 // Also, make sure exact and non-exact variants alias the same.
1487 tj = to = to->
1488 remove_speculative()->
1489 cast_to_instance_id(TypeOopPtr::InstanceBot)->
1490 cast_to_ptr_type(TypePtr::BotPTR)->
1491 cast_to_exactness(false);
1492 }
1493 if (to->speculative() != nullptr) {
1494 tj = to = to->remove_speculative();
1495 }
1496 // Canonicalize the holder of this field
1497 if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1498 // First handle header references such as a LoadKlassNode, even if the
1499 // object's klass is unloaded at compile time (4965979).
1500 if (!is_known_inst) { // Do it only for non-instance types
1501 tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, nullptr, Type::Offset(offset));
1502 }
1503 } else if (offset < 0 || offset >= ik->layout_helper_size_in_bytes()) {
1504 // Static fields are in the space above the normal instance
1505 // fields in the java.lang.Class instance.
1506 if (ik != ciEnv::current()->Class_klass()) {
1507 to = nullptr;
1508 tj = TypeOopPtr::BOTTOM;
1509 offset = tj->offset();
1510 }
1511 } else {
1512 ciInstanceKlass *canonical_holder = ik->get_canonical_holder(offset);
1513 assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1514 assert(tj->offset() == offset, "no change to offset expected");
1515 bool xk = to->klass_is_exact();
1516 int instance_id = to->instance_id();
1517
1518 // If the input type's class is the holder: if exact, the type only includes interfaces implemented by the holder
1519 // but if not exact, it may include extra interfaces: build new type from the holder class to make sure only
1520 // its interfaces are included.
1521 if (xk && ik->equals(canonical_holder)) {
1522 assert(tj == TypeInstPtr::make(to->ptr(), canonical_holder, is_known_inst, nullptr, Type::Offset(offset), instance_id), "exact type should be canonical type");
1523 } else {
1524 assert(xk || !is_known_inst, "Known instance should be exact type");
1525 tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, is_known_inst, nullptr, Type::Offset(offset), instance_id);
1526 }
1527 }
1528 }
1529
1530 // Klass pointers to object array klasses need some flattening
1531 const TypeKlassPtr *tk = tj->isa_klassptr();
1532 if( tk ) {
1533 // If we are referencing a field within a Klass, we need
1534 // to assume the worst case of an Object. Both exact and
1535 // inexact types must flatten to the same alias class so
1536 // use NotNull as the PTR.
1537 if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1538 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull,
1539 env()->Object_klass(),
1540 Type::Offset(offset));
1541 }
1542
1543 if (tk->isa_aryklassptr() && tk->is_aryklassptr()->elem()->isa_klassptr()) {
1544 ciKlass* k = ciObjArrayKlass::make(env()->Object_klass());
1545 if (!k || !k->is_loaded()) { // Only fails for some -Xcomp runs
1546 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull, env()->Object_klass(), Type::Offset(offset));
1547 } else {
1548 tj = tk = TypeAryKlassPtr::make(TypePtr::NotNull, tk->is_aryklassptr()->elem(), k, Type::Offset(offset), tk->is_not_flat(), tk->is_not_null_free(), tk->is_null_free());
1549 }
1550 }
1551 // Check for precise loads from the primary supertype array and force them
1552 // to the supertype cache alias index. Check for generic array loads from
1553 // the primary supertype array and also force them to the supertype cache
1554 // alias index. Since the same load can reach both, we need to merge
1555 // these 2 disparate memories into the same alias class. Since the
1556 // primary supertype array is read-only, there's no chance of confusion
1557 // where we bypass an array load and an array store.
1558 int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1559 if (offset == Type::OffsetBot ||
1560 (offset >= primary_supers_offset &&
1561 offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1562 offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1563 offset = in_bytes(Klass::secondary_super_cache_offset());
1564 tj = tk = tk->with_offset(offset);
1565 }
1566 }
1567
1568 // Flatten all Raw pointers together.
1569 if (tj->base() == Type::RawPtr)
1570 tj = TypeRawPtr::BOTTOM;
1660 intptr_t key = (intptr_t) adr_type;
1661 key ^= key >> logAliasCacheSize;
1662 return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1663 }
1664
1665
1666 //-----------------------------grow_alias_types--------------------------------
1667 void Compile::grow_alias_types() {
1668 const int old_ats = _max_alias_types; // how many before?
1669 const int new_ats = old_ats; // how many more?
1670 const int grow_ats = old_ats+new_ats; // how many now?
1671 _max_alias_types = grow_ats;
1672 _alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1673 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1674 Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1675 for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i];
1676 }
1677
1678
1679 //--------------------------------find_alias_type------------------------------
1680 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field, bool uncached) {
1681 if (!do_aliasing()) {
1682 return alias_type(AliasIdxBot);
1683 }
1684
1685 AliasCacheEntry* ace = nullptr;
1686 if (!uncached) {
1687 ace = probe_alias_cache(adr_type);
1688 if (ace->_adr_type == adr_type) {
1689 return alias_type(ace->_index);
1690 }
1691 }
1692
1693 // Handle special cases.
1694 if (adr_type == nullptr) return alias_type(AliasIdxTop);
1695 if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot);
1696
1697 // Do it the slow way.
1698 const TypePtr* flat = flatten_alias_type(adr_type);
1699
1700 #ifdef ASSERT
1701 {
1702 ResourceMark rm;
1703 assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1704 Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1705 assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1706 Type::str(adr_type));
1707 if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1708 const TypeOopPtr* foop = flat->is_oopptr();
1709 // Scalarizable allocations have exact klass always.
1710 bool exact = !foop->klass_is_exact() || foop->is_known_instance();
1720 if (alias_type(i)->adr_type() == flat) {
1721 idx = i;
1722 break;
1723 }
1724 }
1725
1726 if (idx == AliasIdxTop) {
1727 if (no_create) return nullptr;
1728 // Grow the array if necessary.
1729 if (_num_alias_types == _max_alias_types) grow_alias_types();
1730 // Add a new alias type.
1731 idx = _num_alias_types++;
1732 _alias_types[idx]->Init(idx, flat);
1733 if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false);
1734 if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false);
1735 if (flat->isa_instptr()) {
1736 if (flat->offset() == java_lang_Class::klass_offset()
1737 && flat->is_instptr()->instance_klass() == env()->Class_klass())
1738 alias_type(idx)->set_rewritable(false);
1739 }
1740 ciField* field = nullptr;
1741 if (flat->isa_aryptr()) {
1742 #ifdef ASSERT
1743 const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1744 // (T_BYTE has the weakest alignment and size restrictions...)
1745 assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1746 #endif
1747 const Type* elemtype = flat->is_aryptr()->elem();
1748 if (flat->offset() == TypePtr::OffsetBot) {
1749 alias_type(idx)->set_element(elemtype);
1750 }
1751 int field_offset = flat->is_aryptr()->field_offset().get();
1752 if (flat->is_flat() &&
1753 field_offset != Type::OffsetBot) {
1754 ciInlineKlass* vk = elemtype->inline_klass();
1755 field_offset += vk->first_field_offset();
1756 field = vk->get_field_by_offset(field_offset, false);
1757 }
1758 }
1759 if (flat->isa_klassptr()) {
1760 if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1761 alias_type(idx)->set_rewritable(false);
1762 if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1763 alias_type(idx)->set_rewritable(false);
1764 if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1765 alias_type(idx)->set_rewritable(false);
1766 if (flat->offset() == in_bytes(Klass::misc_flags_offset()))
1767 alias_type(idx)->set_rewritable(false);
1768 if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1769 alias_type(idx)->set_rewritable(false);
1770 if (flat->offset() == in_bytes(Klass::layout_helper_offset()))
1771 alias_type(idx)->set_rewritable(false);
1772 if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1773 alias_type(idx)->set_rewritable(false);
1774 }
1775 // %%% (We would like to finalize JavaThread::threadObj_offset(),
1776 // but the base pointer type is not distinctive enough to identify
1777 // references into JavaThread.)
1778
1779 // Check for final fields.
1780 const TypeInstPtr* tinst = flat->isa_instptr();
1781 if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1782 if (tinst->const_oop() != nullptr &&
1783 tinst->instance_klass() == ciEnv::current()->Class_klass() &&
1784 tinst->offset() >= (tinst->instance_klass()->layout_helper_size_in_bytes())) {
1785 // static field
1786 ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1787 field = k->get_field_by_offset(tinst->offset(), true);
1788 } else if (tinst->is_inlinetypeptr()) {
1789 // Inline type field
1790 ciInlineKlass* vk = tinst->inline_klass();
1791 field = vk->get_field_by_offset(tinst->offset(), false);
1792 } else {
1793 ciInstanceKlass *k = tinst->instance_klass();
1794 field = k->get_field_by_offset(tinst->offset(), false);
1795 }
1796 }
1797 assert(field == nullptr ||
1798 original_field == nullptr ||
1799 (field->holder() == original_field->holder() &&
1800 field->offset_in_bytes() == original_field->offset_in_bytes() &&
1801 field->is_static() == original_field->is_static()), "wrong field?");
1802 // Set field() and is_rewritable() attributes.
1803 if (field != nullptr) {
1804 alias_type(idx)->set_field(field);
1805 if (flat->isa_aryptr()) {
1806 // Fields of flat arrays are rewritable although they are declared final
1807 assert(flat->is_flat(), "must be a flat array");
1808 alias_type(idx)->set_rewritable(true);
1809 }
1810 }
1811 }
1812
1813 // Fill the cache for next time.
1814 if (!uncached) {
1815 ace->_adr_type = adr_type;
1816 ace->_index = idx;
1817 assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
1818
1819 // Might as well try to fill the cache for the flattened version, too.
1820 AliasCacheEntry* face = probe_alias_cache(flat);
1821 if (face->_adr_type == nullptr) {
1822 face->_adr_type = flat;
1823 face->_index = idx;
1824 assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1825 }
1826 }
1827
1828 return alias_type(idx);
1829 }
1830
1831
1832 Compile::AliasType* Compile::alias_type(ciField* field) {
1833 const TypeOopPtr* t;
1834 if (field->is_static())
1835 t = TypeInstPtr::make(field->holder()->java_mirror());
1836 else
1837 t = TypeOopPtr::make_from_klass_raw(field->holder());
1838 AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1839 assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1840 return atp;
1841 }
1842
1843
1844 //------------------------------have_alias_type--------------------------------
1845 bool Compile::have_alias_type(const TypePtr* adr_type) {
1925 assert(!C->major_progress(), "not cleared");
1926
1927 if (_for_post_loop_igvn.length() > 0) {
1928 while (_for_post_loop_igvn.length() > 0) {
1929 Node* n = _for_post_loop_igvn.pop();
1930 n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1931 igvn._worklist.push(n);
1932 }
1933 igvn.optimize();
1934 if (failing()) return;
1935 assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1936 assert(C->parse_predicate_count() == 0, "all parse predicates should have been removed now");
1937
1938 // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1939 if (C->major_progress()) {
1940 C->clear_major_progress(); // ensure that major progress is now clear
1941 }
1942 }
1943 }
1944
1945 void Compile::add_inline_type(Node* n) {
1946 assert(n->is_InlineType(), "unexpected node");
1947 _inline_type_nodes.push(n);
1948 }
1949
1950 void Compile::remove_inline_type(Node* n) {
1951 assert(n->is_InlineType(), "unexpected node");
1952 if (_inline_type_nodes.contains(n)) {
1953 _inline_type_nodes.remove(n);
1954 }
1955 }
1956
1957 // Does the return value keep otherwise useless inline type allocations alive?
1958 static bool return_val_keeps_allocations_alive(Node* ret_val) {
1959 ResourceMark rm;
1960 Unique_Node_List wq;
1961 wq.push(ret_val);
1962 bool some_allocations = false;
1963 for (uint i = 0; i < wq.size(); i++) {
1964 Node* n = wq.at(i);
1965 if (n->outcnt() > 1) {
1966 // Some other use for the allocation
1967 return false;
1968 } else if (n->is_InlineType()) {
1969 wq.push(n->in(1));
1970 } else if (n->is_Phi()) {
1971 for (uint j = 1; j < n->req(); j++) {
1972 wq.push(n->in(j));
1973 }
1974 } else if (n->is_CheckCastPP() &&
1975 n->in(1)->is_Proj() &&
1976 n->in(1)->in(0)->is_Allocate()) {
1977 some_allocations = true;
1978 } else if (n->is_CheckCastPP()) {
1979 wq.push(n->in(1));
1980 }
1981 }
1982 return some_allocations;
1983 }
1984
1985 void Compile::process_inline_types(PhaseIterGVN &igvn, bool remove) {
1986 // Make sure that the return value does not keep an otherwise unused allocation alive
1987 if (tf()->returns_inline_type_as_fields()) {
1988 Node* ret = nullptr;
1989 for (uint i = 1; i < root()->req(); i++) {
1990 Node* in = root()->in(i);
1991 if (in->Opcode() == Op_Return) {
1992 assert(ret == nullptr, "only one return");
1993 ret = in;
1994 }
1995 }
1996 if (ret != nullptr) {
1997 Node* ret_val = ret->in(TypeFunc::Parms);
1998 if (igvn.type(ret_val)->isa_oopptr() &&
1999 return_val_keeps_allocations_alive(ret_val)) {
2000 igvn.replace_input_of(ret, TypeFunc::Parms, InlineTypeNode::tagged_klass(igvn.type(ret_val)->inline_klass(), igvn));
2001 assert(ret_val->outcnt() == 0, "should be dead now");
2002 igvn.remove_dead_node(ret_val);
2003 }
2004 }
2005 }
2006 if (_inline_type_nodes.length() == 0) {
2007 return;
2008 }
2009 // Scalarize inline types in safepoint debug info.
2010 // Delay this until all inlining is over to avoid getting inconsistent debug info.
2011 set_scalarize_in_safepoints(true);
2012 for (int i = _inline_type_nodes.length()-1; i >= 0; i--) {
2013 InlineTypeNode* vt = _inline_type_nodes.at(i)->as_InlineType();
2014 vt->make_scalar_in_safepoints(&igvn);
2015 igvn.record_for_igvn(vt);
2016 }
2017 if (remove) {
2018 // Remove inline type nodes by replacing them with their oop input
2019 while (_inline_type_nodes.length() > 0) {
2020 InlineTypeNode* vt = _inline_type_nodes.pop()->as_InlineType();
2021 if (vt->outcnt() == 0) {
2022 igvn.remove_dead_node(vt);
2023 continue;
2024 }
2025 for (DUIterator i = vt->outs(); vt->has_out(i); i++) {
2026 DEBUG_ONLY(bool must_be_buffered = false);
2027 Node* u = vt->out(i);
2028 // Check if any users are blackholes. If so, rewrite them to use either the
2029 // allocated buffer, or individual components, instead of the inline type node
2030 // that goes away.
2031 if (u->is_Blackhole()) {
2032 BlackholeNode* bh = u->as_Blackhole();
2033
2034 // Unlink the old input
2035 int idx = bh->find_edge(vt);
2036 assert(idx != -1, "The edge should be there");
2037 bh->del_req(idx);
2038 --i;
2039
2040 if (vt->is_allocated(&igvn)) {
2041 // Already has the allocated instance, blackhole that
2042 bh->add_req(vt->get_oop());
2043 } else {
2044 // Not allocated yet, blackhole the components
2045 for (uint c = 0; c < vt->field_count(); c++) {
2046 bh->add_req(vt->field_value(c));
2047 }
2048 }
2049
2050 // Node modified, record for IGVN
2051 igvn.record_for_igvn(bh);
2052 }
2053 #ifdef ASSERT
2054 // Verify that inline type is buffered when replacing by oop
2055 else if (u->is_InlineType()) {
2056 // InlineType uses don't need buffering because they are about to be replaced as well
2057 } else if (u->is_Phi()) {
2058 // TODO 8302217 Remove this once InlineTypeNodes are reliably pushed through
2059 } else {
2060 must_be_buffered = true;
2061 }
2062 if (must_be_buffered && !vt->is_allocated(&igvn)) {
2063 vt->dump(0);
2064 u->dump(0);
2065 assert(false, "Should have been buffered");
2066 }
2067 #endif
2068 }
2069 igvn.replace_node(vt, vt->get_oop());
2070 }
2071 }
2072 igvn.optimize();
2073 }
2074
2075 void Compile::adjust_flat_array_access_aliases(PhaseIterGVN& igvn) {
2076 if (!_has_flat_accesses) {
2077 return;
2078 }
2079 // Initially, all flat array accesses share the same slice to
2080 // keep dependencies with Object[] array accesses (that could be
2081 // to a flat array) correct. We're done with parsing so we
2082 // now know all flat array accesses in this compile
2083 // unit. Let's move flat array accesses to their own slice,
2084 // one per element field. This should help memory access
2085 // optimizations.
2086 ResourceMark rm;
2087 Unique_Node_List wq;
2088 wq.push(root());
2089
2090 Node_List mergememnodes;
2091 Node_List memnodes;
2092
2093 // Alias index currently shared by all flat memory accesses
2094 int index = get_alias_index(TypeAryPtr::INLINES);
2095
2096 // Find MergeMem nodes and flat array accesses
2097 for (uint i = 0; i < wq.size(); i++) {
2098 Node* n = wq.at(i);
2099 if (n->is_Mem()) {
2100 const TypePtr* adr_type = nullptr;
2101 adr_type = get_adr_type(get_alias_index(n->adr_type()));
2102 if (adr_type == TypeAryPtr::INLINES) {
2103 memnodes.push(n);
2104 }
2105 } else if (n->is_MergeMem()) {
2106 MergeMemNode* mm = n->as_MergeMem();
2107 if (mm->memory_at(index) != mm->base_memory()) {
2108 mergememnodes.push(n);
2109 }
2110 }
2111 for (uint j = 0; j < n->req(); j++) {
2112 Node* m = n->in(j);
2113 if (m != nullptr) {
2114 wq.push(m);
2115 }
2116 }
2117 }
2118
2119 if (memnodes.size() > 0) {
2120 _flat_accesses_share_alias = false;
2121
2122 // We are going to change the slice for the flat array
2123 // accesses so we need to clear the cache entries that refer to
2124 // them.
2125 for (uint i = 0; i < AliasCacheSize; i++) {
2126 AliasCacheEntry* ace = &_alias_cache[i];
2127 if (ace->_adr_type != nullptr &&
2128 ace->_adr_type->is_flat()) {
2129 ace->_adr_type = nullptr;
2130 ace->_index = (i != 0) ? 0 : AliasIdxTop; // Make sure the nullptr adr_type resolves to AliasIdxTop
2131 }
2132 }
2133
2134 // Find what aliases we are going to add
2135 int start_alias = num_alias_types()-1;
2136 int stop_alias = 0;
2137
2138 for (uint i = 0; i < memnodes.size(); i++) {
2139 Node* m = memnodes.at(i);
2140 const TypePtr* adr_type = nullptr;
2141 adr_type = m->adr_type();
2142 #ifdef ASSERT
2143 m->as_Mem()->set_adr_type(adr_type);
2144 #endif
2145 int idx = get_alias_index(adr_type);
2146 start_alias = MIN2(start_alias, idx);
2147 stop_alias = MAX2(stop_alias, idx);
2148 }
2149
2150 assert(stop_alias >= start_alias, "should have expanded aliases");
2151
2152 Node_Stack stack(0);
2153 #ifdef ASSERT
2154 VectorSet seen(Thread::current()->resource_area());
2155 #endif
2156 // Now let's fix the memory graph so each flat array access
2157 // is moved to the right slice. Start from the MergeMem nodes.
2158 uint last = unique();
2159 for (uint i = 0; i < mergememnodes.size(); i++) {
2160 MergeMemNode* current = mergememnodes.at(i)->as_MergeMem();
2161 Node* n = current->memory_at(index);
2162 MergeMemNode* mm = nullptr;
2163 do {
2164 // Follow memory edges through memory accesses, phis and
2165 // narrow membars and push nodes on the stack. Once we hit
2166 // bottom memory, we pop element off the stack one at a
2167 // time, in reverse order, and move them to the right slice
2168 // by changing their memory edges.
2169 if ((n->is_Phi() && n->adr_type() != TypePtr::BOTTOM) || n->is_Mem() || n->adr_type() == TypeAryPtr::INLINES) {
2170 assert(!seen.test_set(n->_idx), "");
2171 // Uses (a load for instance) will need to be moved to the
2172 // right slice as well and will get a new memory state
2173 // that we don't know yet. The use could also be the
2174 // backedge of a loop. We put a place holder node between
2175 // the memory node and its uses. We replace that place
2176 // holder with the correct memory state once we know it,
2177 // i.e. when nodes are popped off the stack. Using the
2178 // place holder make the logic work in the presence of
2179 // loops.
2180 if (n->outcnt() > 1) {
2181 Node* place_holder = nullptr;
2182 assert(!n->has_out_with(Op_Node), "");
2183 for (DUIterator k = n->outs(); n->has_out(k); k++) {
2184 Node* u = n->out(k);
2185 if (u != current && u->_idx < last) {
2186 bool success = false;
2187 for (uint l = 0; l < u->req(); l++) {
2188 if (!stack.is_empty() && u == stack.node() && l == stack.index()) {
2189 continue;
2190 }
2191 Node* in = u->in(l);
2192 if (in == n) {
2193 if (place_holder == nullptr) {
2194 place_holder = new Node(1);
2195 place_holder->init_req(0, n);
2196 }
2197 igvn.replace_input_of(u, l, place_holder);
2198 success = true;
2199 }
2200 }
2201 if (success) {
2202 --k;
2203 }
2204 }
2205 }
2206 }
2207 if (n->is_Phi()) {
2208 stack.push(n, 1);
2209 n = n->in(1);
2210 } else if (n->is_Mem()) {
2211 stack.push(n, n->req());
2212 n = n->in(MemNode::Memory);
2213 } else {
2214 assert(n->is_Proj() && n->in(0)->Opcode() == Op_MemBarCPUOrder, "");
2215 stack.push(n, n->req());
2216 n = n->in(0)->in(TypeFunc::Memory);
2217 }
2218 } else {
2219 assert(n->adr_type() == TypePtr::BOTTOM || (n->Opcode() == Op_Node && n->_idx >= last) || (n->is_Proj() && n->in(0)->is_Initialize()), "");
2220 // Build a new MergeMem node to carry the new memory state
2221 // as we build it. IGVN should fold extraneous MergeMem
2222 // nodes.
2223 mm = MergeMemNode::make(n);
2224 igvn.register_new_node_with_optimizer(mm);
2225 while (stack.size() > 0) {
2226 Node* m = stack.node();
2227 uint idx = stack.index();
2228 if (m->is_Mem()) {
2229 // Move memory node to its new slice
2230 const TypePtr* adr_type = m->adr_type();
2231 int alias = get_alias_index(adr_type);
2232 Node* prev = mm->memory_at(alias);
2233 igvn.replace_input_of(m, MemNode::Memory, prev);
2234 mm->set_memory_at(alias, m);
2235 } else if (m->is_Phi()) {
2236 // We need as many new phis as there are new aliases
2237 igvn.replace_input_of(m, idx, mm);
2238 if (idx == m->req()-1) {
2239 Node* r = m->in(0);
2240 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2241 const TypePtr* adr_type = get_adr_type(j);
2242 if (!adr_type->isa_aryptr() || !adr_type->is_flat() || j == (uint)index) {
2243 continue;
2244 }
2245 Node* phi = new PhiNode(r, Type::MEMORY, get_adr_type(j));
2246 igvn.register_new_node_with_optimizer(phi);
2247 for (uint k = 1; k < m->req(); k++) {
2248 phi->init_req(k, m->in(k)->as_MergeMem()->memory_at(j));
2249 }
2250 mm->set_memory_at(j, phi);
2251 }
2252 Node* base_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2253 igvn.register_new_node_with_optimizer(base_phi);
2254 for (uint k = 1; k < m->req(); k++) {
2255 base_phi->init_req(k, m->in(k)->as_MergeMem()->base_memory());
2256 }
2257 mm->set_base_memory(base_phi);
2258 }
2259 } else {
2260 // This is a MemBarCPUOrder node from
2261 // Parse::array_load()/Parse::array_store(), in the
2262 // branch that handles flat arrays hidden under
2263 // an Object[] array. We also need one new membar per
2264 // new alias to keep the unknown access that the
2265 // membars protect properly ordered with accesses to
2266 // known flat array.
2267 assert(m->is_Proj(), "projection expected");
2268 Node* ctrl = m->in(0)->in(TypeFunc::Control);
2269 igvn.replace_input_of(m->in(0), TypeFunc::Control, top());
2270 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2271 const TypePtr* adr_type = get_adr_type(j);
2272 if (!adr_type->isa_aryptr() || !adr_type->is_flat() || j == (uint)index) {
2273 continue;
2274 }
2275 MemBarNode* mb = new MemBarCPUOrderNode(this, j, nullptr);
2276 igvn.register_new_node_with_optimizer(mb);
2277 Node* mem = mm->memory_at(j);
2278 mb->init_req(TypeFunc::Control, ctrl);
2279 mb->init_req(TypeFunc::Memory, mem);
2280 ctrl = new ProjNode(mb, TypeFunc::Control);
2281 igvn.register_new_node_with_optimizer(ctrl);
2282 mem = new ProjNode(mb, TypeFunc::Memory);
2283 igvn.register_new_node_with_optimizer(mem);
2284 mm->set_memory_at(j, mem);
2285 }
2286 igvn.replace_node(m->in(0)->as_Multi()->proj_out(TypeFunc::Control), ctrl);
2287 }
2288 if (idx < m->req()-1) {
2289 idx += 1;
2290 stack.set_index(idx);
2291 n = m->in(idx);
2292 break;
2293 }
2294 // Take care of place holder nodes
2295 if (m->has_out_with(Op_Node)) {
2296 Node* place_holder = m->find_out_with(Op_Node);
2297 if (place_holder != nullptr) {
2298 Node* mm_clone = mm->clone();
2299 igvn.register_new_node_with_optimizer(mm_clone);
2300 Node* hook = new Node(1);
2301 hook->init_req(0, mm);
2302 igvn.replace_node(place_holder, mm_clone);
2303 hook->destruct(&igvn);
2304 }
2305 assert(!m->has_out_with(Op_Node), "place holder should be gone now");
2306 }
2307 stack.pop();
2308 }
2309 }
2310 } while(stack.size() > 0);
2311 // Fix the memory state at the MergeMem we started from
2312 igvn.rehash_node_delayed(current);
2313 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2314 const TypePtr* adr_type = get_adr_type(j);
2315 if (!adr_type->isa_aryptr() || !adr_type->is_flat()) {
2316 continue;
2317 }
2318 current->set_memory_at(j, mm);
2319 }
2320 current->set_memory_at(index, current->base_memory());
2321 }
2322 igvn.optimize();
2323 }
2324 print_method(PHASE_SPLIT_INLINES_ARRAY, 2);
2325 #ifdef ASSERT
2326 if (!_flat_accesses_share_alias) {
2327 wq.clear();
2328 wq.push(root());
2329 for (uint i = 0; i < wq.size(); i++) {
2330 Node* n = wq.at(i);
2331 assert(n->adr_type() != TypeAryPtr::INLINES, "should have been removed from the graph");
2332 for (uint j = 0; j < n->req(); j++) {
2333 Node* m = n->in(j);
2334 if (m != nullptr) {
2335 wq.push(m);
2336 }
2337 }
2338 }
2339 }
2340 #endif
2341 }
2342
2343 void Compile::record_unstable_if_trap(UnstableIfTrap* trap) {
2344 if (OptimizeUnstableIf) {
2345 _unstable_if_traps.append(trap);
2346 }
2347 }
2348
2349 void Compile::remove_useless_unstable_if_traps(Unique_Node_List& useful) {
2350 for (int i = _unstable_if_traps.length() - 1; i >= 0; i--) {
2351 UnstableIfTrap* trap = _unstable_if_traps.at(i);
2352 Node* n = trap->uncommon_trap();
2353 if (!useful.member(n)) {
2354 _unstable_if_traps.delete_at(i); // replaces i-th with last element which is known to be useful (already processed)
2355 }
2356 }
2357 }
2358
2359 // Remove the unstable if trap associated with 'unc' from candidates. It is either dead
2360 // or fold-compares case. Return true if succeed or not found.
2361 //
2362 // In rare cases, the found trap has been processed. It is too late to delete it. Return
2398 assert(next_bci == iter.next_bci() || next_bci == iter.get_dest(), "wrong next_bci at unstable_if");
2399 Bytecodes::Code c = iter.cur_bc();
2400 Node* lhs = nullptr;
2401 Node* rhs = nullptr;
2402 if (c == Bytecodes::_if_acmpeq || c == Bytecodes::_if_acmpne) {
2403 lhs = unc->peek_operand(0);
2404 rhs = unc->peek_operand(1);
2405 } else if (c == Bytecodes::_ifnull || c == Bytecodes::_ifnonnull) {
2406 lhs = unc->peek_operand(0);
2407 }
2408
2409 ResourceMark rm;
2410 const MethodLivenessResult& live_locals = method->liveness_at_bci(next_bci);
2411 assert(live_locals.is_valid(), "broken liveness info");
2412 int len = (int)live_locals.size();
2413
2414 for (int i = 0; i < len; i++) {
2415 Node* local = unc->local(jvms, i);
2416 // kill local using the liveness of next_bci.
2417 // give up when the local looks like an operand to secure reexecution.
2418 if (!live_locals.at(i) && !local->is_top() && local != lhs && local != rhs) {
2419 uint idx = jvms->locoff() + i;
2420 #ifdef ASSERT
2421 if (PrintOpto && Verbose) {
2422 tty->print("[unstable_if] kill local#%d: ", idx);
2423 local->dump();
2424 tty->cr();
2425 }
2426 #endif
2427 igvn.replace_input_of(unc, idx, top());
2428 modified = true;
2429 }
2430 }
2431 }
2432
2433 // keep the modified trap for late query
2434 if (modified) {
2435 trap->set_modified();
2436 } else {
2437 _unstable_if_traps.delete_at(i);
2438 }
2439 }
2440 igvn.optimize();
2441 }
2442
2443 // StringOpts and late inlining of string methods
2444 void Compile::inline_string_calls(bool parse_time) {
2445 {
2446 // remove useless nodes to make the usage analysis simpler
2447 ResourceMark rm;
2448 PhaseRemoveUseless pru(initial_gvn(), *igvn_worklist());
2449 }
2450
2451 {
2452 ResourceMark rm;
2453 print_method(PHASE_BEFORE_STRINGOPTS, 3);
2608
2609 if (_string_late_inlines.length() > 0) {
2610 assert(has_stringbuilder(), "inconsistent");
2611
2612 inline_string_calls(false);
2613
2614 if (failing()) return;
2615
2616 inline_incrementally_cleanup(igvn);
2617 }
2618
2619 set_inlining_incrementally(false);
2620 }
2621
2622 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2623 // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2624 // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2625 // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == nullptr"
2626 // as if "inlining_incrementally() == true" were set.
2627 assert(inlining_incrementally() == false, "not allowed");
2628 #ifdef ASSERT
2629 Unique_Node_List* modified_nodes = _modified_nodes;
2630 _modified_nodes = nullptr;
2631 #endif
2632 assert(_late_inlines.length() > 0, "sanity");
2633
2634 while (_late_inlines.length() > 0) {
2635 igvn_worklist()->ensure_empty(); // should be done with igvn
2636
2637 while (inline_incrementally_one()) {
2638 assert(!failing_internal() || failure_is_artificial(), "inconsistent");
2639 }
2640 if (failing()) return;
2641
2642 inline_incrementally_cleanup(igvn);
2643 }
2644 DEBUG_ONLY( _modified_nodes = modified_nodes; )
2645 }
2646
2647 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2648 if (_loop_opts_cnt > 0) {
2649 while (major_progress() && (_loop_opts_cnt > 0)) {
2650 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2651 PhaseIdealLoop::optimize(igvn, mode);
2652 _loop_opts_cnt--;
2653 if (failing()) return false;
2654 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2655 }
2656 }
2657 return true;
2658 }
2659
2660 // Remove edges from "root" to each SafePoint at a backward branch.
2661 // They were inserted during parsing (see add_safepoint()) to make
2662 // infinite loops without calls or exceptions visible to root, i.e.,
2663 // useful.
2664 void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) {
2771 print_method(PHASE_ITER_GVN_AFTER_VECTOR, 2);
2772 }
2773 assert(!has_vbox_nodes(), "sanity");
2774
2775 if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2776 Compile::TracePhase tp("", &timers[_t_renumberLive]);
2777 igvn_worklist()->ensure_empty(); // should be done with igvn
2778 {
2779 ResourceMark rm;
2780 PhaseRenumberLive prl(initial_gvn(), *igvn_worklist());
2781 }
2782 igvn.reset_from_gvn(initial_gvn());
2783 igvn.optimize();
2784 if (failing()) return;
2785 }
2786
2787 // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
2788 // safepoints
2789 remove_root_to_sfpts_edges(igvn);
2790
2791 // Process inline type nodes now that all inlining is over
2792 process_inline_types(igvn);
2793
2794 adjust_flat_array_access_aliases(igvn);
2795
2796 if (failing()) return;
2797
2798 // Perform escape analysis
2799 if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) {
2800 if (has_loops()) {
2801 // Cleanup graph (remove dead nodes).
2802 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2803 PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2804 if (failing()) return;
2805 }
2806 bool progress;
2807 print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2808 do {
2809 ConnectionGraph::do_analysis(this, &igvn);
2810
2811 if (failing()) return;
2812
2813 int mcount = macro_count(); // Record number of allocations and locks before IGVN
2814
2815 // Optimize out fields loads from scalar replaceable allocations.
2899 if (failing()) return;
2900
2901 // Loop transforms on the ideal graph. Range Check Elimination,
2902 // peeling, unrolling, etc.
2903 if (!optimize_loops(igvn, LoopOptsDefault)) {
2904 return;
2905 }
2906
2907 if (failing()) return;
2908
2909 C->clear_major_progress(); // ensure that major progress is now clear
2910
2911 process_for_post_loop_opts_igvn(igvn);
2912
2913 if (failing()) return;
2914
2915 #ifdef ASSERT
2916 bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2917 #endif
2918
2919 assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2920
2921 if (_late_inlines.length() > 0) {
2922 // More opportunities to optimize virtual and MH calls.
2923 // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2924 process_late_inline_calls_no_inline(igvn);
2925 }
2926
2927 {
2928 TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2929 print_method(PHASE_BEFORE_MACRO_EXPANSION, 3);
2930 PhaseMacroExpand mex(igvn);
2931 if (mex.expand_macro_nodes()) {
2932 assert(failing(), "must bail out w/ explicit message");
2933 return;
2934 }
2935 print_method(PHASE_AFTER_MACRO_EXPANSION, 2);
2936 }
2937
2938 // Process inline type nodes again and remove them. From here
2939 // on we don't need to keep track of field values anymore.
2940 process_inline_types(igvn, /* remove= */ true);
2941
2942 {
2943 TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
2944 if (bs->expand_barriers(this, igvn)) {
2945 assert(failing(), "must bail out w/ explicit message");
2946 return;
2947 }
2948 print_method(PHASE_BARRIER_EXPANSION, 2);
2949 }
2950
2951 if (C->max_vector_size() > 0) {
2952 C->optimize_logic_cones(igvn);
2953 igvn.optimize();
2954 if (failing()) return;
2955 }
2956
2957 DEBUG_ONLY( _modified_nodes = nullptr; )
2958 DEBUG_ONLY( _late_inlines.clear(); )
2959
2960 assert(igvn._worklist.size() == 0, "not empty");
2961 } // (End scope of igvn; run destructor if necessary for asserts.)
2962
2963 check_no_dead_use();
2964
2965 process_print_inlining();
2966
2967 // We will never use the NodeHash table any more. Clear it so that final_graph_reshaping does not have
2968 // to remove hashes to unlock nodes for modifications.
2969 C->node_hash()->clear();
2970
2971 // A method with only infinite loops has no edges entering loops from root
2972 {
2973 TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2974 if (final_graph_reshaping()) {
2975 assert(failing(), "must bail out w/ explicit message");
2976 return;
2977 }
2978 }
2979
2980 print_method(PHASE_OPTIMIZE_FINISHED, 2);
4247 k->subsume_by(m, this);
4248 }
4249 }
4250 }
4251 break;
4252 }
4253 case Op_CmpUL: {
4254 if (!Matcher::has_match_rule(Op_CmpUL)) {
4255 // No support for unsigned long comparisons
4256 ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
4257 Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
4258 Node* orl = new OrLNode(n->in(1), sign_bit_mask);
4259 ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
4260 Node* andl = new AndLNode(orl, remove_sign_mask);
4261 Node* cmp = new CmpLNode(andl, n->in(2));
4262 n->subsume_by(cmp, this);
4263 }
4264 break;
4265 }
4266 #ifdef ASSERT
4267 case Op_InlineType: {
4268 n->dump(-1);
4269 assert(false, "inline type node was not removed");
4270 break;
4271 }
4272 case Op_ConNKlass: {
4273 const TypePtr* tp = n->as_Type()->type()->make_ptr();
4274 ciKlass* klass = tp->is_klassptr()->exact_klass();
4275 assert(klass->is_in_encoding_range(), "klass cannot be compressed");
4276 break;
4277 }
4278 #endif
4279 default:
4280 assert(!n->is_Call(), "");
4281 assert(!n->is_Mem(), "");
4282 assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
4283 break;
4284 }
4285 }
4286
4287 //------------------------------final_graph_reshaping_walk---------------------
4288 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
4289 // requires that the walk visits a node's inputs before visiting the node.
4290 void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
4291 Unique_Node_List sfpt;
4638 }
4639 }
4640
4641 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4642 return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4643 }
4644
4645 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4646 return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4647 }
4648
4649 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4650 if (holder->is_initialized()) {
4651 return false;
4652 }
4653 if (holder->is_being_initialized()) {
4654 if (accessing_method->holder() == holder) {
4655 // Access inside a class. The barrier can be elided when access happens in <clinit>,
4656 // <init>, or a static method. In all those cases, there was an initialization
4657 // barrier on the holder klass passed.
4658 if (accessing_method->is_class_initializer() ||
4659 accessing_method->is_object_constructor() ||
4660 accessing_method->is_static()) {
4661 return false;
4662 }
4663 } else if (accessing_method->holder()->is_subclass_of(holder)) {
4664 // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4665 // In case of <init> or a static method, the barrier is on the subclass is not enough:
4666 // child class can become fully initialized while its parent class is still being initialized.
4667 if (accessing_method->is_class_initializer()) {
4668 return false;
4669 }
4670 }
4671 ciMethod* root = method(); // the root method of compilation
4672 if (root != accessing_method) {
4673 return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4674 }
4675 }
4676 return true;
4677 }
4678
4679 #ifndef PRODUCT
4680 //------------------------------verify_bidirectional_edges---------------------
4681 // For each input edge to a node (ie - for each Use-Def edge), verify that
4682 // there is a corresponding Def-Use edge.
4683 void Compile::verify_bidirectional_edges(Unique_Node_List &visited) {
4684 // Allocate stack of size C->live_nodes()/16 to avoid frequent realloc
4685 uint stack_size = live_nodes() >> 4;
4686 Node_List nstack(MAX2(stack_size, (uint)OptoNodeListSize));
4687 nstack.push(_root);
4703 if (in != nullptr && !in->is_top()) {
4704 // Count instances of `next`
4705 int cnt = 0;
4706 for (uint idx = 0; idx < in->_outcnt; idx++) {
4707 if (in->_out[idx] == n) {
4708 cnt++;
4709 }
4710 }
4711 assert(cnt > 0, "Failed to find Def-Use edge.");
4712 // Check for duplicate edges
4713 // walk the input array downcounting the input edges to n
4714 for (uint j = 0; j < length; j++) {
4715 if (n->in(j) == in) {
4716 cnt--;
4717 }
4718 }
4719 assert(cnt == 0, "Mismatched edge count.");
4720 } else if (in == nullptr) {
4721 assert(i == 0 || i >= n->req() ||
4722 n->is_Region() || n->is_Phi() || n->is_ArrayCopy() ||
4723 (n->is_Allocate() && i >= AllocateNode::InlineType) ||
4724 (n->is_Unlock() && i == (n->req() - 1)) ||
4725 (n->is_MemBar() && i == 5), // the precedence edge to a membar can be removed during macro node expansion
4726 "only region, phi, arraycopy, allocate, unlock or membar nodes have null data edges");
4727 } else {
4728 assert(in->is_top(), "sanity");
4729 // Nothing to check.
4730 }
4731 }
4732 }
4733 }
4734
4735 //------------------------------verify_graph_edges---------------------------
4736 // Walk the Graph and verify that there is a one-to-one correspondence
4737 // between Use-Def edges and Def-Use edges in the graph.
4738 void Compile::verify_graph_edges(bool no_dead_code) {
4739 if (VerifyGraphEdges) {
4740 Unique_Node_List visited;
4741
4742 // Call graph walk to check edges
4743 verify_bidirectional_edges(visited);
4744 if (no_dead_code) {
4745 // Now make sure that no visited node is used by an unvisited node.
4746 bool dead_nodes = false;
4840 // (1) subklass is already limited to a subtype of superklass => always ok
4841 // (2) subklass does not overlap with superklass => always fail
4842 // (3) superklass has NO subtypes and we can check with a simple compare.
4843 Compile::SubTypeCheckResult Compile::static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk, bool skip) {
4844 if (skip) {
4845 return SSC_full_test; // Let caller generate the general case.
4846 }
4847
4848 if (subk->is_java_subtype_of(superk)) {
4849 return SSC_always_true; // (0) and (1) this test cannot fail
4850 }
4851
4852 if (!subk->maybe_java_subtype_of(superk)) {
4853 return SSC_always_false; // (2) true path dead; no dynamic test needed
4854 }
4855
4856 const Type* superelem = superk;
4857 if (superk->isa_aryklassptr()) {
4858 int ignored;
4859 superelem = superk->is_aryklassptr()->base_element_type(ignored);
4860
4861 // Do not fold the subtype check to an array klass pointer comparison for null-able inline type arrays
4862 // because null-free [LMyValue <: null-able [LMyValue but the klasses are different. Perform a full test.
4863 if (!superk->is_aryklassptr()->is_null_free() && superk->is_aryklassptr()->elem()->isa_instklassptr() &&
4864 superk->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->is_inlinetype()) {
4865 return SSC_full_test;
4866 }
4867 }
4868
4869 if (superelem->isa_instklassptr()) {
4870 ciInstanceKlass* ik = superelem->is_instklassptr()->instance_klass();
4871 if (!ik->has_subklass()) {
4872 if (!ik->is_final()) {
4873 // Add a dependency if there is a chance of a later subclass.
4874 dependencies()->assert_leaf_type(ik);
4875 }
4876 if (!superk->maybe_java_subtype_of(subk)) {
4877 return SSC_always_false;
4878 }
4879 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4880 }
4881 } else {
4882 // A primitive array type has no subtypes.
4883 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4884 }
4885
4886 return SSC_full_test;
5446 const Type* t = igvn.type_or_null(n);
5447 assert((t == nullptr) || (t == t->remove_speculative()), "no more speculative types");
5448 if (n->is_Type()) {
5449 t = n->as_Type()->type();
5450 assert(t == t->remove_speculative(), "no more speculative types");
5451 }
5452 // Iterate over outs - endless loops is unreachable from below
5453 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5454 Node *m = n->fast_out(i);
5455 if (not_a_node(m)) {
5456 continue;
5457 }
5458 worklist.push(m);
5459 }
5460 }
5461 igvn.check_no_speculative_types();
5462 #endif
5463 }
5464 }
5465
5466 Node* Compile::optimize_acmp(PhaseGVN* phase, Node* a, Node* b) {
5467 const TypeInstPtr* ta = phase->type(a)->isa_instptr();
5468 const TypeInstPtr* tb = phase->type(b)->isa_instptr();
5469 if (!EnableValhalla || ta == nullptr || tb == nullptr ||
5470 ta->is_zero_type() || tb->is_zero_type() ||
5471 !ta->can_be_inline_type() || !tb->can_be_inline_type()) {
5472 // Use old acmp if one operand is null or not an inline type
5473 return new CmpPNode(a, b);
5474 } else if (ta->is_inlinetypeptr() || tb->is_inlinetypeptr()) {
5475 // We know that one operand is an inline type. Therefore,
5476 // new acmp will only return true if both operands are nullptr.
5477 // Check if both operands are null by or'ing the oops.
5478 a = phase->transform(new CastP2XNode(nullptr, a));
5479 b = phase->transform(new CastP2XNode(nullptr, b));
5480 a = phase->transform(new OrXNode(a, b));
5481 return new CmpXNode(a, phase->MakeConX(0));
5482 }
5483 // Use new acmp
5484 return nullptr;
5485 }
5486
5487 // Auxiliary methods to support randomized stressing/fuzzing.
5488
5489 void Compile::initialize_stress_seed(const DirectiveSet* directive) {
5490 if (FLAG_IS_DEFAULT(StressSeed) || (FLAG_IS_ERGO(StressSeed) && directive->RepeatCompilationOption)) {
5491 _stress_seed = static_cast<uint>(Ticks::now().nanoseconds());
5492 FLAG_SET_ERGO(StressSeed, _stress_seed);
5493 } else {
5494 _stress_seed = StressSeed;
5495 }
5496 if (_log != nullptr) {
5497 _log->elem("stress_test seed='%u'", _stress_seed);
5498 }
5499 }
5500
5501 int Compile::random() {
5502 _stress_seed = os::next_random(_stress_seed);
5503 return static_cast<int>(_stress_seed);
5504 }
5505
5506 // This method can be called the arbitrary number of times, with current count
|