635 _entry_bci(osr_bci),
636 _ilt(nullptr),
637 _stub_function(nullptr),
638 _stub_name(nullptr),
639 _stub_id(-1),
640 _stub_entry_point(nullptr),
641 _max_node_limit(MaxNodeLimit),
642 _post_loop_opts_phase(false),
643 _merge_stores_phase(false),
644 _allow_macro_nodes(true),
645 _inlining_progress(false),
646 _inlining_incrementally(false),
647 _do_cleanup(false),
648 _has_reserved_stack_access(target->has_reserved_stack_access()),
649 #ifndef PRODUCT
650 _igv_idx(0),
651 _trace_opto_output(directive->TraceOptoOutputOption),
652 #endif
653 _has_method_handle_invokes(false),
654 _clinit_barrier_on_entry(false),
655 _stress_seed(0),
656 _comp_arena(mtCompiler, Arena::Tag::tag_comp),
657 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
658 _env(ci_env),
659 _directive(directive),
660 _log(ci_env->log()),
661 _first_failure_details(nullptr),
662 _intrinsics(comp_arena(), 0, 0, nullptr),
663 _macro_nodes(comp_arena(), 8, 0, nullptr),
664 _parse_predicates(comp_arena(), 8, 0, nullptr),
665 _template_assertion_predicate_opaques(comp_arena(), 8, 0, nullptr),
666 _expensive_nodes(comp_arena(), 8, 0, nullptr),
667 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
668 _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
669 _unstable_if_traps(comp_arena(), 8, 0, nullptr),
670 _coarsened_locks(comp_arena(), 8, 0, nullptr),
671 _congraph(nullptr),
672 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
673 _unique(0),
674 _dead_node_count(0),
917 _options(Options::for_runtime_stub()),
918 _method(nullptr),
919 _entry_bci(InvocationEntryBci),
920 _stub_function(stub_function),
921 _stub_name(stub_name),
922 _stub_id(stub_id),
923 _stub_entry_point(nullptr),
924 _max_node_limit(MaxNodeLimit),
925 _post_loop_opts_phase(false),
926 _merge_stores_phase(false),
927 _allow_macro_nodes(true),
928 _inlining_progress(false),
929 _inlining_incrementally(false),
930 _has_reserved_stack_access(false),
931 #ifndef PRODUCT
932 _igv_idx(0),
933 _trace_opto_output(directive->TraceOptoOutputOption),
934 #endif
935 _has_method_handle_invokes(false),
936 _clinit_barrier_on_entry(false),
937 _stress_seed(0),
938 _comp_arena(mtCompiler, Arena::Tag::tag_comp),
939 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
940 _env(ci_env),
941 _directive(directive),
942 _log(ci_env->log()),
943 _first_failure_details(nullptr),
944 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
945 _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
946 _congraph(nullptr),
947 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
948 _unique(0),
949 _dead_node_count(0),
950 _dead_node_list(comp_arena()),
951 _node_arena_one(mtCompiler, Arena::Tag::tag_node),
952 _node_arena_two(mtCompiler, Arena::Tag::tag_node),
953 _node_arena(&_node_arena_one),
954 _mach_constant_base_node(nullptr),
955 _Compile_types(mtCompiler, Arena::Tag::tag_type),
956 _initial_gvn(nullptr),
1086 set_do_scheduling(OptoScheduling);
1087
1088 set_do_vector_loop(false);
1089 set_has_monitors(false);
1090 set_has_scoped_access(false);
1091
1092 if (AllowVectorizeOnDemand) {
1093 if (has_method() && _directive->VectorizeOption) {
1094 set_do_vector_loop(true);
1095 NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
1096 } else if (has_method() && method()->name() != nullptr &&
1097 method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1098 set_do_vector_loop(true);
1099 }
1100 }
1101 set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1102 NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
1103
1104 _max_node_limit = _directive->MaxNodeLimitOption;
1105
1106 if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() && method()->needs_clinit_barrier()) {
1107 set_clinit_barrier_on_entry(true);
1108 }
1109 if (debug_info()->recording_non_safepoints()) {
1110 set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1111 (comp_arena(), 8, 0, nullptr));
1112 set_default_node_notes(Node_Notes::make(this));
1113 }
1114
1115 const int grow_ats = 16;
1116 _max_alias_types = grow_ats;
1117 _alias_types = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1118 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, grow_ats);
1119 Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1120 {
1121 for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i];
1122 }
1123 // Initialize the first few types.
1124 _alias_types[AliasIdxTop]->Init(AliasIdxTop, nullptr);
1125 _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1126 _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1127 _num_alias_types = AliasIdxRaw+1;
4115 dead_nodes.push(in);
4116 }
4117 }
4118 m->disconnect_inputs(this);
4119 }
4120 }
4121
4122 set_java_calls(frc.get_java_call_count());
4123 set_inner_loops(frc.get_inner_loop_count());
4124
4125 // No infinite loops, no reason to bail out.
4126 return false;
4127 }
4128
4129 //-----------------------------too_many_traps----------------------------------
4130 // Report if there are too many traps at the current method and bci.
4131 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
4132 bool Compile::too_many_traps(ciMethod* method,
4133 int bci,
4134 Deoptimization::DeoptReason reason) {
4135 ciMethodData* md = method->method_data();
4136 if (md->is_empty()) {
4137 // Assume the trap has not occurred, or that it occurred only
4138 // because of a transient condition during start-up in the interpreter.
4139 return false;
4140 }
4141 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
4142 if (md->has_trap_at(bci, m, reason) != 0) {
4143 // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
4144 // Also, if there are multiple reasons, or if there is no per-BCI record,
4145 // assume the worst.
4146 if (log())
4147 log()->elem("observe trap='%s' count='%d'",
4148 Deoptimization::trap_reason_name(reason),
4149 md->trap_count(reason));
4150 return true;
4151 } else {
4152 // Ignore method/bci and see if there have been too many globally.
4153 return too_many_traps(reason, md);
4154 }
4155 }
4156
4157 // Less-accurate variant which does not require a method and bci.
4158 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
4159 ciMethodData* logmd) {
4160 if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
4161 // Too many traps globally.
4162 // Note that we use cumulative trap_count, not just md->trap_count.
4163 if (log()) {
4164 int mcount = (logmd == nullptr)? -1: (int)logmd->trap_count(reason);
4165 log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
4166 Deoptimization::trap_reason_name(reason),
4167 mcount, trap_count(reason));
4168 }
4169 return true;
4170 } else {
4171 // The coast is clear.
4172 return false;
4173 }
4174 }
4175
4176 //--------------------------too_many_recompiles--------------------------------
4177 // Report if there are too many recompiles at the current method and bci.
4178 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
4179 // Is not eager to return true, since this will cause the compiler to use
4229 _allowed_reasons = 0;
4230 if (is_method_compilation()) {
4231 for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
4232 assert(rs < BitsPerInt, "recode bit map");
4233 if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
4234 _allowed_reasons |= nth_bit(rs);
4235 }
4236 }
4237 }
4238 }
4239
4240 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4241 return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4242 }
4243
4244 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4245 return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4246 }
4247
4248 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4249 if (holder->is_initialized()) {
4250 return false;
4251 }
4252 if (holder->is_being_initialized()) {
4253 if (accessing_method->holder() == holder) {
4254 // Access inside a class. The barrier can be elided when access happens in <clinit>,
4255 // <init>, or a static method. In all those cases, there was an initialization
4256 // barrier on the holder klass passed.
4257 if (accessing_method->is_static_initializer() ||
4258 accessing_method->is_object_initializer() ||
4259 accessing_method->is_static()) {
4260 return false;
4261 }
4262 } else if (accessing_method->holder()->is_subclass_of(holder)) {
4263 // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4264 // In case of <init> or a static method, the barrier is on the subclass is not enough:
4265 // child class can become fully initialized while its parent class is still being initialized.
4266 if (accessing_method->is_static_initializer()) {
4267 return false;
4268 }
4269 }
4270 ciMethod* root = method(); // the root method of compilation
4271 if (root != accessing_method) {
4272 return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4532 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) {
4533 if (ctrl != nullptr) {
4534 // Express control dependency by a CastII node with a narrow type.
4535 // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4536 // node from floating above the range check during loop optimizations. Otherwise, the
4537 // ConvI2L node may be eliminated independently of the range check, causing the data path
4538 // to become TOP while the control path is still there (although it's unreachable).
4539 value = new CastIINode(ctrl, value, itype, carry_dependency ? ConstraintCastNode::StrongDependency : ConstraintCastNode::RegularDependency, true /* range check dependency */);
4540 value = phase->transform(value);
4541 }
4542 const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4543 return phase->transform(new ConvI2LNode(value, ltype));
4544 }
4545
4546 void Compile::dump_print_inlining() {
4547 inline_printer()->print_on(tty);
4548 }
4549
4550 void Compile::log_late_inline(CallGenerator* cg) {
4551 if (log() != nullptr) {
4552 log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
4553 cg->unique_id());
4554 JVMState* p = cg->call_node()->jvms();
4555 while (p != nullptr) {
4556 log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
4557 p = p->caller();
4558 }
4559 log()->tail("late_inline");
4560 }
4561 }
4562
4563 void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
4564 log_late_inline(cg);
4565 if (log() != nullptr) {
4566 log()->inline_fail(msg);
4567 }
4568 }
4569
4570 void Compile::log_inline_id(CallGenerator* cg) {
4571 if (log() != nullptr) {
4572 // The LogCompilation tool needs a unique way to identify late
|
635 _entry_bci(osr_bci),
636 _ilt(nullptr),
637 _stub_function(nullptr),
638 _stub_name(nullptr),
639 _stub_id(-1),
640 _stub_entry_point(nullptr),
641 _max_node_limit(MaxNodeLimit),
642 _post_loop_opts_phase(false),
643 _merge_stores_phase(false),
644 _allow_macro_nodes(true),
645 _inlining_progress(false),
646 _inlining_incrementally(false),
647 _do_cleanup(false),
648 _has_reserved_stack_access(target->has_reserved_stack_access()),
649 #ifndef PRODUCT
650 _igv_idx(0),
651 _trace_opto_output(directive->TraceOptoOutputOption),
652 #endif
653 _has_method_handle_invokes(false),
654 _clinit_barrier_on_entry(false),
655 _has_clinit_barriers(false),
656 _stress_seed(0),
657 _comp_arena(mtCompiler, Arena::Tag::tag_comp),
658 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
659 _env(ci_env),
660 _directive(directive),
661 _log(ci_env->log()),
662 _first_failure_details(nullptr),
663 _intrinsics(comp_arena(), 0, 0, nullptr),
664 _macro_nodes(comp_arena(), 8, 0, nullptr),
665 _parse_predicates(comp_arena(), 8, 0, nullptr),
666 _template_assertion_predicate_opaques(comp_arena(), 8, 0, nullptr),
667 _expensive_nodes(comp_arena(), 8, 0, nullptr),
668 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
669 _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
670 _unstable_if_traps(comp_arena(), 8, 0, nullptr),
671 _coarsened_locks(comp_arena(), 8, 0, nullptr),
672 _congraph(nullptr),
673 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
674 _unique(0),
675 _dead_node_count(0),
918 _options(Options::for_runtime_stub()),
919 _method(nullptr),
920 _entry_bci(InvocationEntryBci),
921 _stub_function(stub_function),
922 _stub_name(stub_name),
923 _stub_id(stub_id),
924 _stub_entry_point(nullptr),
925 _max_node_limit(MaxNodeLimit),
926 _post_loop_opts_phase(false),
927 _merge_stores_phase(false),
928 _allow_macro_nodes(true),
929 _inlining_progress(false),
930 _inlining_incrementally(false),
931 _has_reserved_stack_access(false),
932 #ifndef PRODUCT
933 _igv_idx(0),
934 _trace_opto_output(directive->TraceOptoOutputOption),
935 #endif
936 _has_method_handle_invokes(false),
937 _clinit_barrier_on_entry(false),
938 _has_clinit_barriers(false),
939 _stress_seed(0),
940 _comp_arena(mtCompiler, Arena::Tag::tag_comp),
941 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
942 _env(ci_env),
943 _directive(directive),
944 _log(ci_env->log()),
945 _first_failure_details(nullptr),
946 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
947 _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
948 _congraph(nullptr),
949 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
950 _unique(0),
951 _dead_node_count(0),
952 _dead_node_list(comp_arena()),
953 _node_arena_one(mtCompiler, Arena::Tag::tag_node),
954 _node_arena_two(mtCompiler, Arena::Tag::tag_node),
955 _node_arena(&_node_arena_one),
956 _mach_constant_base_node(nullptr),
957 _Compile_types(mtCompiler, Arena::Tag::tag_type),
958 _initial_gvn(nullptr),
1088 set_do_scheduling(OptoScheduling);
1089
1090 set_do_vector_loop(false);
1091 set_has_monitors(false);
1092 set_has_scoped_access(false);
1093
1094 if (AllowVectorizeOnDemand) {
1095 if (has_method() && _directive->VectorizeOption) {
1096 set_do_vector_loop(true);
1097 NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
1098 } else if (has_method() && method()->name() != nullptr &&
1099 method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1100 set_do_vector_loop(true);
1101 }
1102 }
1103 set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1104 NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
1105
1106 _max_node_limit = _directive->MaxNodeLimitOption;
1107
1108 if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() &&
1109 (method()->needs_clinit_barrier() || (do_clinit_barriers() && method()->is_static()))) {
1110 set_clinit_barrier_on_entry(true);
1111 if (do_clinit_barriers()) {
1112 set_has_clinit_barriers(true); // Entry clinit barrier is in prolog code.
1113 }
1114 }
1115 if (debug_info()->recording_non_safepoints()) {
1116 set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1117 (comp_arena(), 8, 0, nullptr));
1118 set_default_node_notes(Node_Notes::make(this));
1119 }
1120
1121 const int grow_ats = 16;
1122 _max_alias_types = grow_ats;
1123 _alias_types = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1124 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, grow_ats);
1125 Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1126 {
1127 for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i];
1128 }
1129 // Initialize the first few types.
1130 _alias_types[AliasIdxTop]->Init(AliasIdxTop, nullptr);
1131 _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1132 _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1133 _num_alias_types = AliasIdxRaw+1;
4121 dead_nodes.push(in);
4122 }
4123 }
4124 m->disconnect_inputs(this);
4125 }
4126 }
4127
4128 set_java_calls(frc.get_java_call_count());
4129 set_inner_loops(frc.get_inner_loop_count());
4130
4131 // No infinite loops, no reason to bail out.
4132 return false;
4133 }
4134
4135 //-----------------------------too_many_traps----------------------------------
4136 // Report if there are too many traps at the current method and bci.
4137 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
4138 bool Compile::too_many_traps(ciMethod* method,
4139 int bci,
4140 Deoptimization::DeoptReason reason) {
4141 if (method->has_trap_at(bci)) {
4142 return true;
4143 }
4144 if (PreloadReduceTraps && for_preload()) {
4145 // Preload code should not have traps, if possible.
4146 return true;
4147 }
4148 ciMethodData* md = method->method_data();
4149 if (md->is_empty()) {
4150 // Assume the trap has not occurred, or that it occurred only
4151 // because of a transient condition during start-up in the interpreter.
4152 return false;
4153 }
4154 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
4155 if (md->has_trap_at(bci, m, reason) != 0) {
4156 // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
4157 // Also, if there are multiple reasons, or if there is no per-BCI record,
4158 // assume the worst.
4159 if (log())
4160 log()->elem("observe trap='%s' count='%d'",
4161 Deoptimization::trap_reason_name(reason),
4162 md->trap_count(reason));
4163 return true;
4164 } else {
4165 // Ignore method/bci and see if there have been too many globally.
4166 return too_many_traps(reason, md);
4167 }
4168 }
4169
4170 // Less-accurate variant which does not require a method and bci.
4171 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
4172 ciMethodData* logmd) {
4173 if (PreloadReduceTraps && for_preload()) {
4174 // Preload code should not have traps, if possible.
4175 return true;
4176 }
4177 if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
4178 // Too many traps globally.
4179 // Note that we use cumulative trap_count, not just md->trap_count.
4180 if (log()) {
4181 int mcount = (logmd == nullptr)? -1: (int)logmd->trap_count(reason);
4182 log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
4183 Deoptimization::trap_reason_name(reason),
4184 mcount, trap_count(reason));
4185 }
4186 return true;
4187 } else {
4188 // The coast is clear.
4189 return false;
4190 }
4191 }
4192
4193 //--------------------------too_many_recompiles--------------------------------
4194 // Report if there are too many recompiles at the current method and bci.
4195 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
4196 // Is not eager to return true, since this will cause the compiler to use
4246 _allowed_reasons = 0;
4247 if (is_method_compilation()) {
4248 for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
4249 assert(rs < BitsPerInt, "recode bit map");
4250 if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
4251 _allowed_reasons |= nth_bit(rs);
4252 }
4253 }
4254 }
4255 }
4256
4257 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4258 return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4259 }
4260
4261 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4262 return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4263 }
4264
4265 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4266 if (holder->is_initialized() && !do_clinit_barriers()) {
4267 return false;
4268 }
4269 if (holder->is_being_initialized() || do_clinit_barriers()) {
4270 if (accessing_method->holder() == holder) {
4271 // Access inside a class. The barrier can be elided when access happens in <clinit>,
4272 // <init>, or a static method. In all those cases, there was an initialization
4273 // barrier on the holder klass passed.
4274 if (accessing_method->is_static_initializer() ||
4275 accessing_method->is_object_initializer() ||
4276 accessing_method->is_static()) {
4277 return false;
4278 }
4279 } else if (accessing_method->holder()->is_subclass_of(holder)) {
4280 // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4281 // In case of <init> or a static method, the barrier is on the subclass is not enough:
4282 // child class can become fully initialized while its parent class is still being initialized.
4283 if (accessing_method->is_static_initializer()) {
4284 return false;
4285 }
4286 }
4287 ciMethod* root = method(); // the root method of compilation
4288 if (root != accessing_method) {
4289 return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4549 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) {
4550 if (ctrl != nullptr) {
4551 // Express control dependency by a CastII node with a narrow type.
4552 // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4553 // node from floating above the range check during loop optimizations. Otherwise, the
4554 // ConvI2L node may be eliminated independently of the range check, causing the data path
4555 // to become TOP while the control path is still there (although it's unreachable).
4556 value = new CastIINode(ctrl, value, itype, carry_dependency ? ConstraintCastNode::StrongDependency : ConstraintCastNode::RegularDependency, true /* range check dependency */);
4557 value = phase->transform(value);
4558 }
4559 const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4560 return phase->transform(new ConvI2LNode(value, ltype));
4561 }
4562
4563 void Compile::dump_print_inlining() {
4564 inline_printer()->print_on(tty);
4565 }
4566
4567 void Compile::log_late_inline(CallGenerator* cg) {
4568 if (log() != nullptr) {
4569 log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
4570 cg->unique_id());
4571 JVMState* p = cg->call_node()->jvms();
4572 while (p != nullptr) {
4573 log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
4574 p = p->caller();
4575 }
4576 log()->tail("late_inline");
4577 }
4578 }
4579
4580 void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
4581 log_late_inline(cg);
4582 if (log() != nullptr) {
4583 log()->inline_fail(msg);
4584 }
4585 }
4586
4587 void Compile::log_inline_id(CallGenerator* cg) {
4588 if (log() != nullptr) {
4589 // The LogCompilation tool needs a unique way to identify late
|