626 _method(target),
627 _entry_bci(osr_bci),
628 _ilt(nullptr),
629 _stub_function(nullptr),
630 _stub_name(nullptr),
631 _stub_entry_point(nullptr),
632 _max_node_limit(MaxNodeLimit),
633 _post_loop_opts_phase(false),
634 _merge_stores_phase(false),
635 _allow_macro_nodes(true),
636 _inlining_progress(false),
637 _inlining_incrementally(false),
638 _do_cleanup(false),
639 _has_reserved_stack_access(target->has_reserved_stack_access()),
640 #ifndef PRODUCT
641 _igv_idx(0),
642 _trace_opto_output(directive->TraceOptoOutputOption),
643 #endif
644 _has_method_handle_invokes(false),
645 _clinit_barrier_on_entry(false),
646 _stress_seed(0),
647 _comp_arena(mtCompiler, Arena::Tag::tag_comp),
648 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
649 _env(ci_env),
650 _directive(directive),
651 _log(ci_env->log()),
652 _first_failure_details(nullptr),
653 _intrinsics(comp_arena(), 0, 0, nullptr),
654 _macro_nodes(comp_arena(), 8, 0, nullptr),
655 _parse_predicates(comp_arena(), 8, 0, nullptr),
656 _template_assertion_predicate_opaqs(comp_arena(), 8, 0, nullptr),
657 _expensive_nodes(comp_arena(), 8, 0, nullptr),
658 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
659 _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
660 _unstable_if_traps(comp_arena(), 8, 0, nullptr),
661 _coarsened_locks(comp_arena(), 8, 0, nullptr),
662 _congraph(nullptr),
663 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
664 _unique(0),
665 _dead_node_count(0),
905 _compile_id(0),
906 _options(Options::for_runtime_stub()),
907 _method(nullptr),
908 _entry_bci(InvocationEntryBci),
909 _stub_function(stub_function),
910 _stub_name(stub_name),
911 _stub_entry_point(nullptr),
912 _max_node_limit(MaxNodeLimit),
913 _post_loop_opts_phase(false),
914 _merge_stores_phase(false),
915 _allow_macro_nodes(true),
916 _inlining_progress(false),
917 _inlining_incrementally(false),
918 _has_reserved_stack_access(false),
919 #ifndef PRODUCT
920 _igv_idx(0),
921 _trace_opto_output(directive->TraceOptoOutputOption),
922 #endif
923 _has_method_handle_invokes(false),
924 _clinit_barrier_on_entry(false),
925 _stress_seed(0),
926 _comp_arena(mtCompiler, Arena::Tag::tag_comp),
927 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
928 _env(ci_env),
929 _directive(directive),
930 _log(ci_env->log()),
931 _first_failure_details(nullptr),
932 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
933 _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
934 _congraph(nullptr),
935 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
936 _unique(0),
937 _dead_node_count(0),
938 _dead_node_list(comp_arena()),
939 _node_arena_one(mtCompiler, Arena::Tag::tag_node),
940 _node_arena_two(mtCompiler, Arena::Tag::tag_node),
941 _node_arena(&_node_arena_one),
942 _mach_constant_base_node(nullptr),
943 _Compile_types(mtCompiler, Arena::Tag::tag_type),
944 _initial_gvn(nullptr),
1065 set_do_scheduling(OptoScheduling);
1066
1067 set_do_vector_loop(false);
1068 set_has_monitors(false);
1069 set_has_scoped_access(false);
1070
1071 if (AllowVectorizeOnDemand) {
1072 if (has_method() && _directive->VectorizeOption) {
1073 set_do_vector_loop(true);
1074 NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
1075 } else if (has_method() && method()->name() != nullptr &&
1076 method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1077 set_do_vector_loop(true);
1078 }
1079 }
1080 set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1081 NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
1082
1083 _max_node_limit = _directive->MaxNodeLimitOption;
1084
1085 if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() && method()->needs_clinit_barrier()) {
1086 set_clinit_barrier_on_entry(true);
1087 }
1088 if (debug_info()->recording_non_safepoints()) {
1089 set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1090 (comp_arena(), 8, 0, nullptr));
1091 set_default_node_notes(Node_Notes::make(this));
1092 }
1093
1094 const int grow_ats = 16;
1095 _max_alias_types = grow_ats;
1096 _alias_types = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1097 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, grow_ats);
1098 Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1099 {
1100 for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i];
1101 }
1102 // Initialize the first few types.
1103 _alias_types[AliasIdxTop]->Init(AliasIdxTop, nullptr);
1104 _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1105 _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1106 _num_alias_types = AliasIdxRaw+1;
4088 frc.get_float_count() > 32 &&
4089 frc.get_double_count() == 0 &&
4090 (10 * frc.get_call_count() < frc.get_float_count()) ) {
4091 set_24_bit_selection_and_mode(false, true);
4092 }
4093 #endif // IA32
4094
4095 set_java_calls(frc.get_java_call_count());
4096 set_inner_loops(frc.get_inner_loop_count());
4097
4098 // No infinite loops, no reason to bail out.
4099 return false;
4100 }
4101
4102 //-----------------------------too_many_traps----------------------------------
4103 // Report if there are too many traps at the current method and bci.
4104 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
4105 bool Compile::too_many_traps(ciMethod* method,
4106 int bci,
4107 Deoptimization::DeoptReason reason) {
4108 ciMethodData* md = method->method_data();
4109 if (md->is_empty()) {
4110 // Assume the trap has not occurred, or that it occurred only
4111 // because of a transient condition during start-up in the interpreter.
4112 return false;
4113 }
4114 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
4115 if (md->has_trap_at(bci, m, reason) != 0) {
4116 // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
4117 // Also, if there are multiple reasons, or if there is no per-BCI record,
4118 // assume the worst.
4119 if (log())
4120 log()->elem("observe trap='%s' count='%d'",
4121 Deoptimization::trap_reason_name(reason),
4122 md->trap_count(reason));
4123 return true;
4124 } else {
4125 // Ignore method/bci and see if there have been too many globally.
4126 return too_many_traps(reason, md);
4127 }
4202 _allowed_reasons = 0;
4203 if (is_method_compilation()) {
4204 for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
4205 assert(rs < BitsPerInt, "recode bit map");
4206 if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
4207 _allowed_reasons |= nth_bit(rs);
4208 }
4209 }
4210 }
4211 }
4212
4213 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4214 return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4215 }
4216
4217 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4218 return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4219 }
4220
4221 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4222 if (holder->is_initialized()) {
4223 return false;
4224 }
4225 if (holder->is_being_initialized()) {
4226 if (accessing_method->holder() == holder) {
4227 // Access inside a class. The barrier can be elided when access happens in <clinit>,
4228 // <init>, or a static method. In all those cases, there was an initialization
4229 // barrier on the holder klass passed.
4230 if (accessing_method->is_static_initializer() ||
4231 accessing_method->is_object_initializer() ||
4232 accessing_method->is_static()) {
4233 return false;
4234 }
4235 } else if (accessing_method->holder()->is_subclass_of(holder)) {
4236 // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4237 // In case of <init> or a static method, the barrier is on the subclass is not enough:
4238 // child class can become fully initialized while its parent class is still being initialized.
4239 if (accessing_method->is_static_initializer()) {
4240 return false;
4241 }
4242 }
4243 ciMethod* root = method(); // the root method of compilation
4244 if (root != accessing_method) {
4245 return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4491 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) {
4492 if (ctrl != nullptr) {
4493 // Express control dependency by a CastII node with a narrow type.
4494 // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4495 // node from floating above the range check during loop optimizations. Otherwise, the
4496 // ConvI2L node may be eliminated independently of the range check, causing the data path
4497 // to become TOP while the control path is still there (although it's unreachable).
4498 value = new CastIINode(ctrl, value, itype, carry_dependency ? ConstraintCastNode::StrongDependency : ConstraintCastNode::RegularDependency, true /* range check dependency */);
4499 value = phase->transform(value);
4500 }
4501 const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4502 return phase->transform(new ConvI2LNode(value, ltype));
4503 }
4504
4505 void Compile::dump_print_inlining() {
4506 inline_printer()->print_on(tty);
4507 }
4508
4509 void Compile::log_late_inline(CallGenerator* cg) {
4510 if (log() != nullptr) {
4511 log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
4512 cg->unique_id());
4513 JVMState* p = cg->call_node()->jvms();
4514 while (p != nullptr) {
4515 log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
4516 p = p->caller();
4517 }
4518 log()->tail("late_inline");
4519 }
4520 }
4521
4522 void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
4523 log_late_inline(cg);
4524 if (log() != nullptr) {
4525 log()->inline_fail(msg);
4526 }
4527 }
4528
4529 void Compile::log_inline_id(CallGenerator* cg) {
4530 if (log() != nullptr) {
4531 // The LogCompilation tool needs a unique way to identify late
|
626 _method(target),
627 _entry_bci(osr_bci),
628 _ilt(nullptr),
629 _stub_function(nullptr),
630 _stub_name(nullptr),
631 _stub_entry_point(nullptr),
632 _max_node_limit(MaxNodeLimit),
633 _post_loop_opts_phase(false),
634 _merge_stores_phase(false),
635 _allow_macro_nodes(true),
636 _inlining_progress(false),
637 _inlining_incrementally(false),
638 _do_cleanup(false),
639 _has_reserved_stack_access(target->has_reserved_stack_access()),
640 #ifndef PRODUCT
641 _igv_idx(0),
642 _trace_opto_output(directive->TraceOptoOutputOption),
643 #endif
644 _has_method_handle_invokes(false),
645 _clinit_barrier_on_entry(false),
646 _has_clinit_barriers(false),
647 _stress_seed(0),
648 _comp_arena(mtCompiler, Arena::Tag::tag_comp),
649 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
650 _env(ci_env),
651 _directive(directive),
652 _log(ci_env->log()),
653 _first_failure_details(nullptr),
654 _intrinsics(comp_arena(), 0, 0, nullptr),
655 _macro_nodes(comp_arena(), 8, 0, nullptr),
656 _parse_predicates(comp_arena(), 8, 0, nullptr),
657 _template_assertion_predicate_opaqs(comp_arena(), 8, 0, nullptr),
658 _expensive_nodes(comp_arena(), 8, 0, nullptr),
659 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
660 _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
661 _unstable_if_traps(comp_arena(), 8, 0, nullptr),
662 _coarsened_locks(comp_arena(), 8, 0, nullptr),
663 _congraph(nullptr),
664 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
665 _unique(0),
666 _dead_node_count(0),
906 _compile_id(0),
907 _options(Options::for_runtime_stub()),
908 _method(nullptr),
909 _entry_bci(InvocationEntryBci),
910 _stub_function(stub_function),
911 _stub_name(stub_name),
912 _stub_entry_point(nullptr),
913 _max_node_limit(MaxNodeLimit),
914 _post_loop_opts_phase(false),
915 _merge_stores_phase(false),
916 _allow_macro_nodes(true),
917 _inlining_progress(false),
918 _inlining_incrementally(false),
919 _has_reserved_stack_access(false),
920 #ifndef PRODUCT
921 _igv_idx(0),
922 _trace_opto_output(directive->TraceOptoOutputOption),
923 #endif
924 _has_method_handle_invokes(false),
925 _clinit_barrier_on_entry(false),
926 _has_clinit_barriers(false),
927 _stress_seed(0),
928 _comp_arena(mtCompiler, Arena::Tag::tag_comp),
929 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
930 _env(ci_env),
931 _directive(directive),
932 _log(ci_env->log()),
933 _first_failure_details(nullptr),
934 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
935 _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
936 _congraph(nullptr),
937 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
938 _unique(0),
939 _dead_node_count(0),
940 _dead_node_list(comp_arena()),
941 _node_arena_one(mtCompiler, Arena::Tag::tag_node),
942 _node_arena_two(mtCompiler, Arena::Tag::tag_node),
943 _node_arena(&_node_arena_one),
944 _mach_constant_base_node(nullptr),
945 _Compile_types(mtCompiler, Arena::Tag::tag_type),
946 _initial_gvn(nullptr),
1067 set_do_scheduling(OptoScheduling);
1068
1069 set_do_vector_loop(false);
1070 set_has_monitors(false);
1071 set_has_scoped_access(false);
1072
1073 if (AllowVectorizeOnDemand) {
1074 if (has_method() && _directive->VectorizeOption) {
1075 set_do_vector_loop(true);
1076 NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
1077 } else if (has_method() && method()->name() != nullptr &&
1078 method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1079 set_do_vector_loop(true);
1080 }
1081 }
1082 set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1083 NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
1084
1085 _max_node_limit = _directive->MaxNodeLimitOption;
1086
1087 if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() &&
1088 (method()->needs_clinit_barrier() || (do_clinit_barriers() && method()->is_static()))) {
1089 set_clinit_barrier_on_entry(true);
1090 if (do_clinit_barriers()) {
1091 set_has_clinit_barriers(true); // Entry clinit barrier is in prolog code.
1092 }
1093 }
1094 if (debug_info()->recording_non_safepoints()) {
1095 set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1096 (comp_arena(), 8, 0, nullptr));
1097 set_default_node_notes(Node_Notes::make(this));
1098 }
1099
1100 const int grow_ats = 16;
1101 _max_alias_types = grow_ats;
1102 _alias_types = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1103 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, grow_ats);
1104 Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1105 {
1106 for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i];
1107 }
1108 // Initialize the first few types.
1109 _alias_types[AliasIdxTop]->Init(AliasIdxTop, nullptr);
1110 _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1111 _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1112 _num_alias_types = AliasIdxRaw+1;
4094 frc.get_float_count() > 32 &&
4095 frc.get_double_count() == 0 &&
4096 (10 * frc.get_call_count() < frc.get_float_count()) ) {
4097 set_24_bit_selection_and_mode(false, true);
4098 }
4099 #endif // IA32
4100
4101 set_java_calls(frc.get_java_call_count());
4102 set_inner_loops(frc.get_inner_loop_count());
4103
4104 // No infinite loops, no reason to bail out.
4105 return false;
4106 }
4107
4108 //-----------------------------too_many_traps----------------------------------
4109 // Report if there are too many traps at the current method and bci.
4110 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
4111 bool Compile::too_many_traps(ciMethod* method,
4112 int bci,
4113 Deoptimization::DeoptReason reason) {
4114 if (method->has_trap_at(bci)) {
4115 return true;
4116 }
4117 ciMethodData* md = method->method_data();
4118 if (md->is_empty()) {
4119 // Assume the trap has not occurred, or that it occurred only
4120 // because of a transient condition during start-up in the interpreter.
4121 return false;
4122 }
4123 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
4124 if (md->has_trap_at(bci, m, reason) != 0) {
4125 // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
4126 // Also, if there are multiple reasons, or if there is no per-BCI record,
4127 // assume the worst.
4128 if (log())
4129 log()->elem("observe trap='%s' count='%d'",
4130 Deoptimization::trap_reason_name(reason),
4131 md->trap_count(reason));
4132 return true;
4133 } else {
4134 // Ignore method/bci and see if there have been too many globally.
4135 return too_many_traps(reason, md);
4136 }
4211 _allowed_reasons = 0;
4212 if (is_method_compilation()) {
4213 for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
4214 assert(rs < BitsPerInt, "recode bit map");
4215 if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
4216 _allowed_reasons |= nth_bit(rs);
4217 }
4218 }
4219 }
4220 }
4221
4222 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4223 return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4224 }
4225
4226 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4227 return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4228 }
4229
4230 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4231 if (holder->is_initialized() && !do_clinit_barriers()) {
4232 return false;
4233 }
4234 if (holder->is_being_initialized() || do_clinit_barriers()) {
4235 if (accessing_method->holder() == holder) {
4236 // Access inside a class. The barrier can be elided when access happens in <clinit>,
4237 // <init>, or a static method. In all those cases, there was an initialization
4238 // barrier on the holder klass passed.
4239 if (accessing_method->is_static_initializer() ||
4240 accessing_method->is_object_initializer() ||
4241 accessing_method->is_static()) {
4242 return false;
4243 }
4244 } else if (accessing_method->holder()->is_subclass_of(holder)) {
4245 // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4246 // In case of <init> or a static method, the barrier is on the subclass is not enough:
4247 // child class can become fully initialized while its parent class is still being initialized.
4248 if (accessing_method->is_static_initializer()) {
4249 return false;
4250 }
4251 }
4252 ciMethod* root = method(); // the root method of compilation
4253 if (root != accessing_method) {
4254 return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4500 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) {
4501 if (ctrl != nullptr) {
4502 // Express control dependency by a CastII node with a narrow type.
4503 // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4504 // node from floating above the range check during loop optimizations. Otherwise, the
4505 // ConvI2L node may be eliminated independently of the range check, causing the data path
4506 // to become TOP while the control path is still there (although it's unreachable).
4507 value = new CastIINode(ctrl, value, itype, carry_dependency ? ConstraintCastNode::StrongDependency : ConstraintCastNode::RegularDependency, true /* range check dependency */);
4508 value = phase->transform(value);
4509 }
4510 const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4511 return phase->transform(new ConvI2LNode(value, ltype));
4512 }
4513
4514 void Compile::dump_print_inlining() {
4515 inline_printer()->print_on(tty);
4516 }
4517
4518 void Compile::log_late_inline(CallGenerator* cg) {
4519 if (log() != nullptr) {
4520 log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
4521 cg->unique_id());
4522 JVMState* p = cg->call_node()->jvms();
4523 while (p != nullptr) {
4524 log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
4525 p = p->caller();
4526 }
4527 log()->tail("late_inline");
4528 }
4529 }
4530
4531 void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
4532 log_late_inline(cg);
4533 if (log() != nullptr) {
4534 log()->inline_fail(msg);
4535 }
4536 }
4537
4538 void Compile::log_inline_id(CallGenerator* cg) {
4539 if (log() != nullptr) {
4540 // The LogCompilation tool needs a unique way to identify late
|