< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page

 551     tty->print_cr("*********************************************************");
 552     tty->print_cr("** Bailout: Recompile without boxing elimination       **");
 553     tty->print_cr("*********************************************************");
 554   }
 555   if ((do_locks_coarsening() != EliminateLocks) && PrintOpto) {
 556     // Recompiling without locks coarsening
 557     tty->print_cr("*********************************************************");
 558     tty->print_cr("** Bailout: Recompile without locks coarsening         **");
 559     tty->print_cr("*********************************************************");
 560   }
 561   if (env()->break_at_compile()) {
 562     // Open the debugger when compiling this method.
 563     tty->print("### Breaking when compiling: ");
 564     method()->print_short_name();
 565     tty->cr();
 566     BREAKPOINT;
 567   }
 568 
 569   if( PrintOpto ) {
 570     if (is_osr_compilation()) {
 571       tty->print("[OSR]%3d", _compile_id);
 572     } else {
 573       tty->print("%3d", _compile_id);




 574     }

 575   }
 576 #endif
 577 }
 578 
 579 #ifndef PRODUCT
 580 void Compile::print_phase(const char* phase_name) {
 581   tty->print_cr("%u.\t%s", ++_phase_counter, phase_name);
 582 }
 583 
 584 void Compile::print_ideal_ir(const char* phase_name) {
 585   // keep the following output all in one block
 586   // This output goes directly to the tty, not the compiler log.
 587   // To enable tools to match it up with the compilation activity,
 588   // be sure to tag this tty output with the compile ID.
 589 
 590   // Node dumping can cause a safepoint, which can break the tty lock.
 591   // Buffer all node dumps, so that all safepoints happen before we lock.
 592   ResourceMark rm;
 593   stringStream ss;
 594 
 595   if (_output == nullptr) {
 596     ss.print_cr("AFTER: %s", phase_name);
 597     // Print out all nodes in ascending order of index.
 598     root()->dump_bfs(MaxNodeLimit, nullptr, "+S$", &ss);
 599   } else {
 600     // Dump the node blockwise if we have a scheduling
 601     _output->print_scheduling(&ss);
 602   }
 603 
 604   // Check that the lock is not broken by a safepoint.
 605   NoSafepointVerifier nsv;
 606   ttyLocker ttyl;
 607   if (xtty != nullptr) {
 608     xtty->head("ideal compile_id='%d'%s compile_phase='%s'",
 609                compile_id(),
 610                is_osr_compilation() ? " compile_kind='osr'" : "",

 611                phase_name);
 612   }
 613 
 614   tty->print("%s", ss.as_string());
 615 
 616   if (xtty != nullptr) {
 617     xtty->tail("ideal");
 618   }
 619 }
 620 #endif
 621 
 622 // ============================================================================
 623 //------------------------------Compile standard-------------------------------
 624 
 625 // Compile a method.  entry_bci is -1 for normal compilations and indicates
 626 // the continuation bci for on stack replacement.
 627 
 628 
 629 Compile::Compile(ciEnv* ci_env, ciMethod* target, int osr_bci,
 630                  Options options, DirectiveSet* directive)

 635       _entry_bci(osr_bci),
 636       _ilt(nullptr),
 637       _stub_function(nullptr),
 638       _stub_name(nullptr),
 639       _stub_id(-1),
 640       _stub_entry_point(nullptr),
 641       _max_node_limit(MaxNodeLimit),
 642       _post_loop_opts_phase(false),
 643       _merge_stores_phase(false),
 644       _allow_macro_nodes(true),
 645       _inlining_progress(false),
 646       _inlining_incrementally(false),
 647       _do_cleanup(false),
 648       _has_reserved_stack_access(target->has_reserved_stack_access()),
 649 #ifndef PRODUCT
 650       _igv_idx(0),
 651       _trace_opto_output(directive->TraceOptoOutputOption),
 652 #endif
 653       _has_method_handle_invokes(false),
 654       _clinit_barrier_on_entry(false),

 655       _stress_seed(0),
 656       _comp_arena(mtCompiler, Arena::Tag::tag_comp),
 657       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 658       _env(ci_env),
 659       _directive(directive),
 660       _log(ci_env->log()),
 661       _first_failure_details(nullptr),
 662       _intrinsics(comp_arena(), 0, 0, nullptr),
 663       _macro_nodes(comp_arena(), 8, 0, nullptr),
 664       _parse_predicates(comp_arena(), 8, 0, nullptr),
 665       _template_assertion_predicate_opaques(comp_arena(), 8, 0, nullptr),
 666       _expensive_nodes(comp_arena(), 8, 0, nullptr),
 667       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 668       _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
 669       _unstable_if_traps(comp_arena(), 8, 0, nullptr),
 670       _coarsened_locks(comp_arena(), 8, 0, nullptr),
 671       _congraph(nullptr),
 672       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 673           _unique(0),
 674       _dead_node_count(0),

 917       _options(Options::for_runtime_stub()),
 918       _method(nullptr),
 919       _entry_bci(InvocationEntryBci),
 920       _stub_function(stub_function),
 921       _stub_name(stub_name),
 922       _stub_id(stub_id),
 923       _stub_entry_point(nullptr),
 924       _max_node_limit(MaxNodeLimit),
 925       _post_loop_opts_phase(false),
 926       _merge_stores_phase(false),
 927       _allow_macro_nodes(true),
 928       _inlining_progress(false),
 929       _inlining_incrementally(false),
 930       _has_reserved_stack_access(false),
 931 #ifndef PRODUCT
 932       _igv_idx(0),
 933       _trace_opto_output(directive->TraceOptoOutputOption),
 934 #endif
 935       _has_method_handle_invokes(false),
 936       _clinit_barrier_on_entry(false),

 937       _stress_seed(0),
 938       _comp_arena(mtCompiler, Arena::Tag::tag_comp),
 939       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 940       _env(ci_env),
 941       _directive(directive),
 942       _log(ci_env->log()),
 943       _first_failure_details(nullptr),
 944       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 945       _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
 946       _congraph(nullptr),
 947       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 948           _unique(0),
 949       _dead_node_count(0),
 950       _dead_node_list(comp_arena()),
 951       _node_arena_one(mtCompiler, Arena::Tag::tag_node),
 952       _node_arena_two(mtCompiler, Arena::Tag::tag_node),
 953       _node_arena(&_node_arena_one),
 954       _mach_constant_base_node(nullptr),
 955       _Compile_types(mtCompiler, Arena::Tag::tag_type),
 956       _initial_gvn(nullptr),

1086   set_do_scheduling(OptoScheduling);
1087 
1088   set_do_vector_loop(false);
1089   set_has_monitors(false);
1090   set_has_scoped_access(false);
1091 
1092   if (AllowVectorizeOnDemand) {
1093     if (has_method() && _directive->VectorizeOption) {
1094       set_do_vector_loop(true);
1095       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
1096     } else if (has_method() && method()->name() != nullptr &&
1097                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1098       set_do_vector_loop(true);
1099     }
1100   }
1101   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1102   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})
1103 
1104   _max_node_limit = _directive->MaxNodeLimitOption;
1105 
1106   if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() && method()->needs_clinit_barrier()) {

1107     set_clinit_barrier_on_entry(true);



1108   }
1109   if (debug_info()->recording_non_safepoints()) {
1110     set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1111                         (comp_arena(), 8, 0, nullptr));
1112     set_default_node_notes(Node_Notes::make(this));
1113   }
1114 
1115   const int grow_ats = 16;
1116   _max_alias_types = grow_ats;
1117   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1118   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1119   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1120   {
1121     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1122   }
1123   // Initialize the first few types.
1124   _alias_types[AliasIdxTop]->Init(AliasIdxTop, nullptr);
1125   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1126   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1127   _num_alias_types = AliasIdxRaw+1;

4115           dead_nodes.push(in);
4116         }
4117       }
4118       m->disconnect_inputs(this);
4119     }
4120   }
4121 
4122   set_java_calls(frc.get_java_call_count());
4123   set_inner_loops(frc.get_inner_loop_count());
4124 
4125   // No infinite loops, no reason to bail out.
4126   return false;
4127 }
4128 
4129 //-----------------------------too_many_traps----------------------------------
4130 // Report if there are too many traps at the current method and bci.
4131 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
4132 bool Compile::too_many_traps(ciMethod* method,
4133                              int bci,
4134                              Deoptimization::DeoptReason reason) {







4135   ciMethodData* md = method->method_data();
4136   if (md->is_empty()) {
4137     // Assume the trap has not occurred, or that it occurred only
4138     // because of a transient condition during start-up in the interpreter.
4139     return false;
4140   }
4141   ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
4142   if (md->has_trap_at(bci, m, reason) != 0) {
4143     // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
4144     // Also, if there are multiple reasons, or if there is no per-BCI record,
4145     // assume the worst.
4146     if (log())
4147       log()->elem("observe trap='%s' count='%d'",
4148                   Deoptimization::trap_reason_name(reason),
4149                   md->trap_count(reason));
4150     return true;
4151   } else {
4152     // Ignore method/bci and see if there have been too many globally.
4153     return too_many_traps(reason, md);
4154   }
4155 }
4156 
4157 // Less-accurate variant which does not require a method and bci.
4158 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
4159                              ciMethodData* logmd) {




4160   if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
4161     // Too many traps globally.
4162     // Note that we use cumulative trap_count, not just md->trap_count.
4163     if (log()) {
4164       int mcount = (logmd == nullptr)? -1: (int)logmd->trap_count(reason);
4165       log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
4166                   Deoptimization::trap_reason_name(reason),
4167                   mcount, trap_count(reason));
4168     }
4169     return true;
4170   } else {
4171     // The coast is clear.
4172     return false;
4173   }
4174 }
4175 
4176 //--------------------------too_many_recompiles--------------------------------
4177 // Report if there are too many recompiles at the current method and bci.
4178 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
4179 // Is not eager to return true, since this will cause the compiler to use

4229   _allowed_reasons = 0;
4230   if (is_method_compilation()) {
4231     for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
4232       assert(rs < BitsPerInt, "recode bit map");
4233       if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
4234         _allowed_reasons |= nth_bit(rs);
4235       }
4236     }
4237   }
4238 }
4239 
4240 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4241   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4242 }
4243 
4244 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4245   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4246 }
4247 
4248 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4249   if (holder->is_initialized()) {
4250     return false;
4251   }
4252   if (holder->is_being_initialized()) {
4253     if (accessing_method->holder() == holder) {
4254       // Access inside a class. The barrier can be elided when access happens in <clinit>,
4255       // <init>, or a static method. In all those cases, there was an initialization
4256       // barrier on the holder klass passed.
4257       if (accessing_method->is_static_initializer() ||
4258           accessing_method->is_object_initializer() ||
4259           accessing_method->is_static()) {
4260         return false;
4261       }
4262     } else if (accessing_method->holder()->is_subclass_of(holder)) {
4263       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4264       // In case of <init> or a static method, the barrier is on the subclass is not enough:
4265       // child class can become fully initialized while its parent class is still being initialized.
4266       if (accessing_method->is_static_initializer()) {
4267         return false;
4268       }
4269     }
4270     ciMethod* root = method(); // the root method of compilation
4271     if (root != accessing_method) {
4272       return needs_clinit_barrier(holder, root); // check access in the context of compilation root

4532 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) {
4533   if (ctrl != nullptr) {
4534     // Express control dependency by a CastII node with a narrow type.
4535     // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4536     // node from floating above the range check during loop optimizations. Otherwise, the
4537     // ConvI2L node may be eliminated independently of the range check, causing the data path
4538     // to become TOP while the control path is still there (although it's unreachable).
4539     value = new CastIINode(ctrl, value, itype, carry_dependency ? ConstraintCastNode::StrongDependency : ConstraintCastNode::RegularDependency, true /* range check dependency */);
4540     value = phase->transform(value);
4541   }
4542   const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4543   return phase->transform(new ConvI2LNode(value, ltype));
4544 }
4545 
4546 void Compile::dump_print_inlining() {
4547   inline_printer()->print_on(tty);
4548 }
4549 
4550 void Compile::log_late_inline(CallGenerator* cg) {
4551   if (log() != nullptr) {
4552     log()->head("late_inline method='%d'  inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
4553                 cg->unique_id());
4554     JVMState* p = cg->call_node()->jvms();
4555     while (p != nullptr) {
4556       log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
4557       p = p->caller();
4558     }
4559     log()->tail("late_inline");
4560   }
4561 }
4562 
4563 void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
4564   log_late_inline(cg);
4565   if (log() != nullptr) {
4566     log()->inline_fail(msg);
4567   }
4568 }
4569 
4570 void Compile::log_inline_id(CallGenerator* cg) {
4571   if (log() != nullptr) {
4572     // The LogCompilation tool needs a unique way to identify late

 551     tty->print_cr("*********************************************************");
 552     tty->print_cr("** Bailout: Recompile without boxing elimination       **");
 553     tty->print_cr("*********************************************************");
 554   }
 555   if ((do_locks_coarsening() != EliminateLocks) && PrintOpto) {
 556     // Recompiling without locks coarsening
 557     tty->print_cr("*********************************************************");
 558     tty->print_cr("** Bailout: Recompile without locks coarsening         **");
 559     tty->print_cr("*********************************************************");
 560   }
 561   if (env()->break_at_compile()) {
 562     // Open the debugger when compiling this method.
 563     tty->print("### Breaking when compiling: ");
 564     method()->print_short_name();
 565     tty->cr();
 566     BREAKPOINT;
 567   }
 568 
 569   if( PrintOpto ) {
 570     if (is_osr_compilation()) {
 571       tty->print("[OSR]");
 572     } else if (env()->task()->is_precompile()) {
 573       if (for_preload()) {
 574         tty->print("[PRE]");
 575       } else {
 576         tty->print("[AOT]");
 577       }
 578     }
 579     tty->print("%3d", _compile_id);
 580   }
 581 #endif
 582 }
 583 
 584 #ifndef PRODUCT
 585 void Compile::print_phase(const char* phase_name) {
 586   tty->print_cr("%u.\t%s", ++_phase_counter, phase_name);
 587 }
 588 
 589 void Compile::print_ideal_ir(const char* phase_name) {
 590   // keep the following output all in one block
 591   // This output goes directly to the tty, not the compiler log.
 592   // To enable tools to match it up with the compilation activity,
 593   // be sure to tag this tty output with the compile ID.
 594 
 595   // Node dumping can cause a safepoint, which can break the tty lock.
 596   // Buffer all node dumps, so that all safepoints happen before we lock.
 597   ResourceMark rm;
 598   stringStream ss;
 599 
 600   if (_output == nullptr) {
 601     ss.print_cr("AFTER: %s", phase_name);
 602     // Print out all nodes in ascending order of index.
 603     root()->dump_bfs(MaxNodeLimit, nullptr, "+S$", &ss);
 604   } else {
 605     // Dump the node blockwise if we have a scheduling
 606     _output->print_scheduling(&ss);
 607   }
 608 
 609   // Check that the lock is not broken by a safepoint.
 610   NoSafepointVerifier nsv;
 611   ttyLocker ttyl;
 612   if (xtty != nullptr) {
 613     xtty->head("ideal compile_id='%d'%s compile_phase='%s'",
 614                compile_id(),
 615                is_osr_compilation() ? " compile_kind='osr'" :
 616                (for_preload() ? " compile_kind='AP'" : ""),
 617                phase_name);
 618   }
 619 
 620   tty->print("%s", ss.as_string());
 621 
 622   if (xtty != nullptr) {
 623     xtty->tail("ideal");
 624   }
 625 }
 626 #endif
 627 
 628 // ============================================================================
 629 //------------------------------Compile standard-------------------------------
 630 
 631 // Compile a method.  entry_bci is -1 for normal compilations and indicates
 632 // the continuation bci for on stack replacement.
 633 
 634 
 635 Compile::Compile(ciEnv* ci_env, ciMethod* target, int osr_bci,
 636                  Options options, DirectiveSet* directive)

 641       _entry_bci(osr_bci),
 642       _ilt(nullptr),
 643       _stub_function(nullptr),
 644       _stub_name(nullptr),
 645       _stub_id(-1),
 646       _stub_entry_point(nullptr),
 647       _max_node_limit(MaxNodeLimit),
 648       _post_loop_opts_phase(false),
 649       _merge_stores_phase(false),
 650       _allow_macro_nodes(true),
 651       _inlining_progress(false),
 652       _inlining_incrementally(false),
 653       _do_cleanup(false),
 654       _has_reserved_stack_access(target->has_reserved_stack_access()),
 655 #ifndef PRODUCT
 656       _igv_idx(0),
 657       _trace_opto_output(directive->TraceOptoOutputOption),
 658 #endif
 659       _has_method_handle_invokes(false),
 660       _clinit_barrier_on_entry(false),
 661       _has_clinit_barriers(false),
 662       _stress_seed(0),
 663       _comp_arena(mtCompiler, Arena::Tag::tag_comp),
 664       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 665       _env(ci_env),
 666       _directive(directive),
 667       _log(ci_env->log()),
 668       _first_failure_details(nullptr),
 669       _intrinsics(comp_arena(), 0, 0, nullptr),
 670       _macro_nodes(comp_arena(), 8, 0, nullptr),
 671       _parse_predicates(comp_arena(), 8, 0, nullptr),
 672       _template_assertion_predicate_opaques(comp_arena(), 8, 0, nullptr),
 673       _expensive_nodes(comp_arena(), 8, 0, nullptr),
 674       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 675       _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
 676       _unstable_if_traps(comp_arena(), 8, 0, nullptr),
 677       _coarsened_locks(comp_arena(), 8, 0, nullptr),
 678       _congraph(nullptr),
 679       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 680           _unique(0),
 681       _dead_node_count(0),

 924       _options(Options::for_runtime_stub()),
 925       _method(nullptr),
 926       _entry_bci(InvocationEntryBci),
 927       _stub_function(stub_function),
 928       _stub_name(stub_name),
 929       _stub_id(stub_id),
 930       _stub_entry_point(nullptr),
 931       _max_node_limit(MaxNodeLimit),
 932       _post_loop_opts_phase(false),
 933       _merge_stores_phase(false),
 934       _allow_macro_nodes(true),
 935       _inlining_progress(false),
 936       _inlining_incrementally(false),
 937       _has_reserved_stack_access(false),
 938 #ifndef PRODUCT
 939       _igv_idx(0),
 940       _trace_opto_output(directive->TraceOptoOutputOption),
 941 #endif
 942       _has_method_handle_invokes(false),
 943       _clinit_barrier_on_entry(false),
 944       _has_clinit_barriers(false),
 945       _stress_seed(0),
 946       _comp_arena(mtCompiler, Arena::Tag::tag_comp),
 947       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 948       _env(ci_env),
 949       _directive(directive),
 950       _log(ci_env->log()),
 951       _first_failure_details(nullptr),
 952       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 953       _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
 954       _congraph(nullptr),
 955       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 956           _unique(0),
 957       _dead_node_count(0),
 958       _dead_node_list(comp_arena()),
 959       _node_arena_one(mtCompiler, Arena::Tag::tag_node),
 960       _node_arena_two(mtCompiler, Arena::Tag::tag_node),
 961       _node_arena(&_node_arena_one),
 962       _mach_constant_base_node(nullptr),
 963       _Compile_types(mtCompiler, Arena::Tag::tag_type),
 964       _initial_gvn(nullptr),

1094   set_do_scheduling(OptoScheduling);
1095 
1096   set_do_vector_loop(false);
1097   set_has_monitors(false);
1098   set_has_scoped_access(false);
1099 
1100   if (AllowVectorizeOnDemand) {
1101     if (has_method() && _directive->VectorizeOption) {
1102       set_do_vector_loop(true);
1103       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
1104     } else if (has_method() && method()->name() != nullptr &&
1105                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1106       set_do_vector_loop(true);
1107     }
1108   }
1109   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1110   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})
1111 
1112   _max_node_limit = _directive->MaxNodeLimitOption;
1113 
1114   if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() &&
1115       (method()->needs_clinit_barrier() || (do_clinit_barriers() && method()->is_static()))) {
1116     set_clinit_barrier_on_entry(true);
1117     if (do_clinit_barriers()) {
1118       set_has_clinit_barriers(true); // Entry clinit barrier is in prolog code.
1119     }
1120   }
1121   if (debug_info()->recording_non_safepoints()) {
1122     set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1123                         (comp_arena(), 8, 0, nullptr));
1124     set_default_node_notes(Node_Notes::make(this));
1125   }
1126 
1127   const int grow_ats = 16;
1128   _max_alias_types = grow_ats;
1129   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1130   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1131   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1132   {
1133     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1134   }
1135   // Initialize the first few types.
1136   _alias_types[AliasIdxTop]->Init(AliasIdxTop, nullptr);
1137   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1138   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1139   _num_alias_types = AliasIdxRaw+1;

4127           dead_nodes.push(in);
4128         }
4129       }
4130       m->disconnect_inputs(this);
4131     }
4132   }
4133 
4134   set_java_calls(frc.get_java_call_count());
4135   set_inner_loops(frc.get_inner_loop_count());
4136 
4137   // No infinite loops, no reason to bail out.
4138   return false;
4139 }
4140 
4141 //-----------------------------too_many_traps----------------------------------
4142 // Report if there are too many traps at the current method and bci.
4143 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
4144 bool Compile::too_many_traps(ciMethod* method,
4145                              int bci,
4146                              Deoptimization::DeoptReason reason) {
4147   if (method->has_trap_at(bci)) {
4148     return true;
4149   }
4150   if (PreloadReduceTraps && for_preload()) {
4151     // Preload code should not have traps, if possible.
4152     return true;
4153   }
4154   ciMethodData* md = method->method_data();
4155   if (md->is_empty()) {
4156     // Assume the trap has not occurred, or that it occurred only
4157     // because of a transient condition during start-up in the interpreter.
4158     return false;
4159   }
4160   ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
4161   if (md->has_trap_at(bci, m, reason) != 0) {
4162     // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
4163     // Also, if there are multiple reasons, or if there is no per-BCI record,
4164     // assume the worst.
4165     if (log())
4166       log()->elem("observe trap='%s' count='%d'",
4167                   Deoptimization::trap_reason_name(reason),
4168                   md->trap_count(reason));
4169     return true;
4170   } else {
4171     // Ignore method/bci and see if there have been too many globally.
4172     return too_many_traps(reason, md);
4173   }
4174 }
4175 
4176 // Less-accurate variant which does not require a method and bci.
4177 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
4178                              ciMethodData* logmd) {
4179   if (PreloadReduceTraps && for_preload()) {
4180     // Preload code should not have traps, if possible.
4181     return true;
4182   }
4183   if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
4184     // Too many traps globally.
4185     // Note that we use cumulative trap_count, not just md->trap_count.
4186     if (log()) {
4187       int mcount = (logmd == nullptr)? -1: (int)logmd->trap_count(reason);
4188       log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
4189                   Deoptimization::trap_reason_name(reason),
4190                   mcount, trap_count(reason));
4191     }
4192     return true;
4193   } else {
4194     // The coast is clear.
4195     return false;
4196   }
4197 }
4198 
4199 //--------------------------too_many_recompiles--------------------------------
4200 // Report if there are too many recompiles at the current method and bci.
4201 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
4202 // Is not eager to return true, since this will cause the compiler to use

4252   _allowed_reasons = 0;
4253   if (is_method_compilation()) {
4254     for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
4255       assert(rs < BitsPerInt, "recode bit map");
4256       if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
4257         _allowed_reasons |= nth_bit(rs);
4258       }
4259     }
4260   }
4261 }
4262 
4263 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4264   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4265 }
4266 
4267 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4268   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4269 }
4270 
4271 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4272   if (holder->is_initialized() && !do_clinit_barriers()) {
4273     return false;
4274   }
4275   if (holder->is_being_initialized() || do_clinit_barriers()) {
4276     if (accessing_method->holder() == holder) {
4277       // Access inside a class. The barrier can be elided when access happens in <clinit>,
4278       // <init>, or a static method. In all those cases, there was an initialization
4279       // barrier on the holder klass passed.
4280       if (accessing_method->is_static_initializer() ||
4281           accessing_method->is_object_initializer() ||
4282           accessing_method->is_static()) {
4283         return false;
4284       }
4285     } else if (accessing_method->holder()->is_subclass_of(holder)) {
4286       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4287       // In case of <init> or a static method, the barrier is on the subclass is not enough:
4288       // child class can become fully initialized while its parent class is still being initialized.
4289       if (accessing_method->is_static_initializer()) {
4290         return false;
4291       }
4292     }
4293     ciMethod* root = method(); // the root method of compilation
4294     if (root != accessing_method) {
4295       return needs_clinit_barrier(holder, root); // check access in the context of compilation root

4555 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) {
4556   if (ctrl != nullptr) {
4557     // Express control dependency by a CastII node with a narrow type.
4558     // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4559     // node from floating above the range check during loop optimizations. Otherwise, the
4560     // ConvI2L node may be eliminated independently of the range check, causing the data path
4561     // to become TOP while the control path is still there (although it's unreachable).
4562     value = new CastIINode(ctrl, value, itype, carry_dependency ? ConstraintCastNode::StrongDependency : ConstraintCastNode::RegularDependency, true /* range check dependency */);
4563     value = phase->transform(value);
4564   }
4565   const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4566   return phase->transform(new ConvI2LNode(value, ltype));
4567 }
4568 
4569 void Compile::dump_print_inlining() {
4570   inline_printer()->print_on(tty);
4571 }
4572 
4573 void Compile::log_late_inline(CallGenerator* cg) {
4574   if (log() != nullptr) {
4575     log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
4576                 cg->unique_id());
4577     JVMState* p = cg->call_node()->jvms();
4578     while (p != nullptr) {
4579       log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
4580       p = p->caller();
4581     }
4582     log()->tail("late_inline");
4583   }
4584 }
4585 
4586 void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
4587   log_late_inline(cg);
4588   if (log() != nullptr) {
4589     log()->inline_fail(msg);
4590   }
4591 }
4592 
4593 void Compile::log_inline_id(CallGenerator* cg) {
4594   if (log() != nullptr) {
4595     // The LogCompilation tool needs a unique way to identify late
< prev index next >