< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page

 628       _method(target),
 629       _entry_bci(osr_bci),
 630       _ilt(nullptr),
 631       _stub_function(nullptr),
 632       _stub_name(nullptr),
 633       _stub_entry_point(nullptr),
 634       _max_node_limit(MaxNodeLimit),
 635       _post_loop_opts_phase(false),
 636       _merge_stores_phase(false),
 637       _allow_macro_nodes(true),
 638       _inlining_progress(false),
 639       _inlining_incrementally(false),
 640       _do_cleanup(false),
 641       _has_reserved_stack_access(target->has_reserved_stack_access()),
 642 #ifndef PRODUCT
 643       _igv_idx(0),
 644       _trace_opto_output(directive->TraceOptoOutputOption),
 645 #endif
 646       _has_method_handle_invokes(false),
 647       _clinit_barrier_on_entry(false),

 648       _stress_seed(0),
 649       _comp_arena(mtCompiler, Arena::Tag::tag_comp),
 650       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 651       _env(ci_env),
 652       _directive(directive),
 653       _log(ci_env->log()),
 654       _first_failure_details(nullptr),
 655       _intrinsics(comp_arena(), 0, 0, nullptr),
 656       _macro_nodes(comp_arena(), 8, 0, nullptr),
 657       _parse_predicates(comp_arena(), 8, 0, nullptr),
 658       _template_assertion_predicate_opaques(comp_arena(), 8, 0, nullptr),
 659       _expensive_nodes(comp_arena(), 8, 0, nullptr),
 660       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 661       _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
 662       _unstable_if_traps(comp_arena(), 8, 0, nullptr),
 663       _coarsened_locks(comp_arena(), 8, 0, nullptr),
 664       _congraph(nullptr),
 665       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 666           _unique(0),
 667       _dead_node_count(0),

 907       _compile_id(0),
 908       _options(Options::for_runtime_stub()),
 909       _method(nullptr),
 910       _entry_bci(InvocationEntryBci),
 911       _stub_function(stub_function),
 912       _stub_name(stub_name),
 913       _stub_entry_point(nullptr),
 914       _max_node_limit(MaxNodeLimit),
 915       _post_loop_opts_phase(false),
 916       _merge_stores_phase(false),
 917       _allow_macro_nodes(true),
 918       _inlining_progress(false),
 919       _inlining_incrementally(false),
 920       _has_reserved_stack_access(false),
 921 #ifndef PRODUCT
 922       _igv_idx(0),
 923       _trace_opto_output(directive->TraceOptoOutputOption),
 924 #endif
 925       _has_method_handle_invokes(false),
 926       _clinit_barrier_on_entry(false),

 927       _stress_seed(0),
 928       _comp_arena(mtCompiler, Arena::Tag::tag_comp),
 929       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 930       _env(ci_env),
 931       _directive(directive),
 932       _log(ci_env->log()),
 933       _first_failure_details(nullptr),
 934       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 935       _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
 936       _congraph(nullptr),
 937       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 938           _unique(0),
 939       _dead_node_count(0),
 940       _dead_node_list(comp_arena()),
 941       _node_arena_one(mtCompiler, Arena::Tag::tag_node),
 942       _node_arena_two(mtCompiler, Arena::Tag::tag_node),
 943       _node_arena(&_node_arena_one),
 944       _mach_constant_base_node(nullptr),
 945       _Compile_types(mtCompiler, Arena::Tag::tag_type),
 946       _initial_gvn(nullptr),

1067   set_do_scheduling(OptoScheduling);
1068 
1069   set_do_vector_loop(false);
1070   set_has_monitors(false);
1071   set_has_scoped_access(false);
1072 
1073   if (AllowVectorizeOnDemand) {
1074     if (has_method() && _directive->VectorizeOption) {
1075       set_do_vector_loop(true);
1076       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
1077     } else if (has_method() && method()->name() != nullptr &&
1078                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1079       set_do_vector_loop(true);
1080     }
1081   }
1082   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1083   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})
1084 
1085   _max_node_limit = _directive->MaxNodeLimitOption;
1086 
1087   if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() && method()->needs_clinit_barrier()) {

1088     set_clinit_barrier_on_entry(true);



1089   }
1090   if (debug_info()->recording_non_safepoints()) {
1091     set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1092                         (comp_arena(), 8, 0, nullptr));
1093     set_default_node_notes(Node_Notes::make(this));
1094   }
1095 
1096   const int grow_ats = 16;
1097   _max_alias_types = grow_ats;
1098   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1099   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1100   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1101   {
1102     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1103   }
1104   // Initialize the first few types.
1105   _alias_types[AliasIdxTop]->Init(AliasIdxTop, nullptr);
1106   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1107   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1108   _num_alias_types = AliasIdxRaw+1;

4100       frc.get_float_count() > 32 &&
4101       frc.get_double_count() == 0 &&
4102       (10 * frc.get_call_count() < frc.get_float_count()) ) {
4103     set_24_bit_selection_and_mode(false, true);
4104   }
4105 #endif // IA32
4106 
4107   set_java_calls(frc.get_java_call_count());
4108   set_inner_loops(frc.get_inner_loop_count());
4109 
4110   // No infinite loops, no reason to bail out.
4111   return false;
4112 }
4113 
4114 //-----------------------------too_many_traps----------------------------------
4115 // Report if there are too many traps at the current method and bci.
4116 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
4117 bool Compile::too_many_traps(ciMethod* method,
4118                              int bci,
4119                              Deoptimization::DeoptReason reason) {







4120   ciMethodData* md = method->method_data();
4121   if (md->is_empty()) {
4122     // Assume the trap has not occurred, or that it occurred only
4123     // because of a transient condition during start-up in the interpreter.
4124     return false;
4125   }
4126   ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
4127   if (md->has_trap_at(bci, m, reason) != 0) {
4128     // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
4129     // Also, if there are multiple reasons, or if there is no per-BCI record,
4130     // assume the worst.
4131     if (log())
4132       log()->elem("observe trap='%s' count='%d'",
4133                   Deoptimization::trap_reason_name(reason),
4134                   md->trap_count(reason));
4135     return true;
4136   } else {
4137     // Ignore method/bci and see if there have been too many globally.
4138     return too_many_traps(reason, md);
4139   }
4140 }
4141 
4142 // Less-accurate variant which does not require a method and bci.
4143 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
4144                              ciMethodData* logmd) {




4145   if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
4146     // Too many traps globally.
4147     // Note that we use cumulative trap_count, not just md->trap_count.
4148     if (log()) {
4149       int mcount = (logmd == nullptr)? -1: (int)logmd->trap_count(reason);
4150       log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
4151                   Deoptimization::trap_reason_name(reason),
4152                   mcount, trap_count(reason));
4153     }
4154     return true;
4155   } else {
4156     // The coast is clear.
4157     return false;
4158   }
4159 }
4160 
4161 //--------------------------too_many_recompiles--------------------------------
4162 // Report if there are too many recompiles at the current method and bci.
4163 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
4164 // Is not eager to return true, since this will cause the compiler to use

4214   _allowed_reasons = 0;
4215   if (is_method_compilation()) {
4216     for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
4217       assert(rs < BitsPerInt, "recode bit map");
4218       if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
4219         _allowed_reasons |= nth_bit(rs);
4220       }
4221     }
4222   }
4223 }
4224 
4225 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4226   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4227 }
4228 
4229 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4230   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4231 }
4232 
4233 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4234   if (holder->is_initialized()) {
4235     return false;
4236   }
4237   if (holder->is_being_initialized()) {
4238     if (accessing_method->holder() == holder) {
4239       // Access inside a class. The barrier can be elided when access happens in <clinit>,
4240       // <init>, or a static method. In all those cases, there was an initialization
4241       // barrier on the holder klass passed.
4242       if (accessing_method->is_static_initializer() ||
4243           accessing_method->is_object_initializer() ||
4244           accessing_method->is_static()) {
4245         return false;
4246       }
4247     } else if (accessing_method->holder()->is_subclass_of(holder)) {
4248       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4249       // In case of <init> or a static method, the barrier is on the subclass is not enough:
4250       // child class can become fully initialized while its parent class is still being initialized.
4251       if (accessing_method->is_static_initializer()) {
4252         return false;
4253       }
4254     }
4255     ciMethod* root = method(); // the root method of compilation
4256     if (root != accessing_method) {
4257       return needs_clinit_barrier(holder, root); // check access in the context of compilation root

4517 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) {
4518   if (ctrl != nullptr) {
4519     // Express control dependency by a CastII node with a narrow type.
4520     // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4521     // node from floating above the range check during loop optimizations. Otherwise, the
4522     // ConvI2L node may be eliminated independently of the range check, causing the data path
4523     // to become TOP while the control path is still there (although it's unreachable).
4524     value = new CastIINode(ctrl, value, itype, carry_dependency ? ConstraintCastNode::StrongDependency : ConstraintCastNode::RegularDependency, true /* range check dependency */);
4525     value = phase->transform(value);
4526   }
4527   const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4528   return phase->transform(new ConvI2LNode(value, ltype));
4529 }
4530 
4531 void Compile::dump_print_inlining() {
4532   inline_printer()->print_on(tty);
4533 }
4534 
4535 void Compile::log_late_inline(CallGenerator* cg) {
4536   if (log() != nullptr) {
4537     log()->head("late_inline method='%d'  inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
4538                 cg->unique_id());
4539     JVMState* p = cg->call_node()->jvms();
4540     while (p != nullptr) {
4541       log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
4542       p = p->caller();
4543     }
4544     log()->tail("late_inline");
4545   }
4546 }
4547 
4548 void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
4549   log_late_inline(cg);
4550   if (log() != nullptr) {
4551     log()->inline_fail(msg);
4552   }
4553 }
4554 
4555 void Compile::log_inline_id(CallGenerator* cg) {
4556   if (log() != nullptr) {
4557     // The LogCompilation tool needs a unique way to identify late

 628       _method(target),
 629       _entry_bci(osr_bci),
 630       _ilt(nullptr),
 631       _stub_function(nullptr),
 632       _stub_name(nullptr),
 633       _stub_entry_point(nullptr),
 634       _max_node_limit(MaxNodeLimit),
 635       _post_loop_opts_phase(false),
 636       _merge_stores_phase(false),
 637       _allow_macro_nodes(true),
 638       _inlining_progress(false),
 639       _inlining_incrementally(false),
 640       _do_cleanup(false),
 641       _has_reserved_stack_access(target->has_reserved_stack_access()),
 642 #ifndef PRODUCT
 643       _igv_idx(0),
 644       _trace_opto_output(directive->TraceOptoOutputOption),
 645 #endif
 646       _has_method_handle_invokes(false),
 647       _clinit_barrier_on_entry(false),
 648       _has_clinit_barriers(false),
 649       _stress_seed(0),
 650       _comp_arena(mtCompiler, Arena::Tag::tag_comp),
 651       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 652       _env(ci_env),
 653       _directive(directive),
 654       _log(ci_env->log()),
 655       _first_failure_details(nullptr),
 656       _intrinsics(comp_arena(), 0, 0, nullptr),
 657       _macro_nodes(comp_arena(), 8, 0, nullptr),
 658       _parse_predicates(comp_arena(), 8, 0, nullptr),
 659       _template_assertion_predicate_opaques(comp_arena(), 8, 0, nullptr),
 660       _expensive_nodes(comp_arena(), 8, 0, nullptr),
 661       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 662       _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
 663       _unstable_if_traps(comp_arena(), 8, 0, nullptr),
 664       _coarsened_locks(comp_arena(), 8, 0, nullptr),
 665       _congraph(nullptr),
 666       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 667           _unique(0),
 668       _dead_node_count(0),

 908       _compile_id(0),
 909       _options(Options::for_runtime_stub()),
 910       _method(nullptr),
 911       _entry_bci(InvocationEntryBci),
 912       _stub_function(stub_function),
 913       _stub_name(stub_name),
 914       _stub_entry_point(nullptr),
 915       _max_node_limit(MaxNodeLimit),
 916       _post_loop_opts_phase(false),
 917       _merge_stores_phase(false),
 918       _allow_macro_nodes(true),
 919       _inlining_progress(false),
 920       _inlining_incrementally(false),
 921       _has_reserved_stack_access(false),
 922 #ifndef PRODUCT
 923       _igv_idx(0),
 924       _trace_opto_output(directive->TraceOptoOutputOption),
 925 #endif
 926       _has_method_handle_invokes(false),
 927       _clinit_barrier_on_entry(false),
 928       _has_clinit_barriers(false),
 929       _stress_seed(0),
 930       _comp_arena(mtCompiler, Arena::Tag::tag_comp),
 931       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 932       _env(ci_env),
 933       _directive(directive),
 934       _log(ci_env->log()),
 935       _first_failure_details(nullptr),
 936       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 937       _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
 938       _congraph(nullptr),
 939       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 940           _unique(0),
 941       _dead_node_count(0),
 942       _dead_node_list(comp_arena()),
 943       _node_arena_one(mtCompiler, Arena::Tag::tag_node),
 944       _node_arena_two(mtCompiler, Arena::Tag::tag_node),
 945       _node_arena(&_node_arena_one),
 946       _mach_constant_base_node(nullptr),
 947       _Compile_types(mtCompiler, Arena::Tag::tag_type),
 948       _initial_gvn(nullptr),

1069   set_do_scheduling(OptoScheduling);
1070 
1071   set_do_vector_loop(false);
1072   set_has_monitors(false);
1073   set_has_scoped_access(false);
1074 
1075   if (AllowVectorizeOnDemand) {
1076     if (has_method() && _directive->VectorizeOption) {
1077       set_do_vector_loop(true);
1078       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
1079     } else if (has_method() && method()->name() != nullptr &&
1080                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1081       set_do_vector_loop(true);
1082     }
1083   }
1084   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1085   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})
1086 
1087   _max_node_limit = _directive->MaxNodeLimitOption;
1088 
1089   if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() &&
1090       (method()->needs_clinit_barrier() || (do_clinit_barriers() && method()->is_static()))) {
1091     set_clinit_barrier_on_entry(true);
1092     if (do_clinit_barriers()) {
1093       set_has_clinit_barriers(true); // Entry clinit barrier is in prolog code.
1094     }
1095   }
1096   if (debug_info()->recording_non_safepoints()) {
1097     set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1098                         (comp_arena(), 8, 0, nullptr));
1099     set_default_node_notes(Node_Notes::make(this));
1100   }
1101 
1102   const int grow_ats = 16;
1103   _max_alias_types = grow_ats;
1104   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1105   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1106   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1107   {
1108     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1109   }
1110   // Initialize the first few types.
1111   _alias_types[AliasIdxTop]->Init(AliasIdxTop, nullptr);
1112   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1113   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1114   _num_alias_types = AliasIdxRaw+1;

4106       frc.get_float_count() > 32 &&
4107       frc.get_double_count() == 0 &&
4108       (10 * frc.get_call_count() < frc.get_float_count()) ) {
4109     set_24_bit_selection_and_mode(false, true);
4110   }
4111 #endif // IA32
4112 
4113   set_java_calls(frc.get_java_call_count());
4114   set_inner_loops(frc.get_inner_loop_count());
4115 
4116   // No infinite loops, no reason to bail out.
4117   return false;
4118 }
4119 
4120 //-----------------------------too_many_traps----------------------------------
4121 // Report if there are too many traps at the current method and bci.
4122 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
4123 bool Compile::too_many_traps(ciMethod* method,
4124                              int bci,
4125                              Deoptimization::DeoptReason reason) {
4126   if (method->has_trap_at(bci)) {
4127     return true;
4128   }
4129   if (PreloadReduceTraps && for_preload()) {
4130     // Preload code should not have traps, if possible.
4131     return true;
4132   }
4133   ciMethodData* md = method->method_data();
4134   if (md->is_empty()) {
4135     // Assume the trap has not occurred, or that it occurred only
4136     // because of a transient condition during start-up in the interpreter.
4137     return false;
4138   }
4139   ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
4140   if (md->has_trap_at(bci, m, reason) != 0) {
4141     // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
4142     // Also, if there are multiple reasons, or if there is no per-BCI record,
4143     // assume the worst.
4144     if (log())
4145       log()->elem("observe trap='%s' count='%d'",
4146                   Deoptimization::trap_reason_name(reason),
4147                   md->trap_count(reason));
4148     return true;
4149   } else {
4150     // Ignore method/bci and see if there have been too many globally.
4151     return too_many_traps(reason, md);
4152   }
4153 }
4154 
4155 // Less-accurate variant which does not require a method and bci.
4156 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
4157                              ciMethodData* logmd) {
4158   if (PreloadReduceTraps && for_preload()) {
4159     // Preload code should not have traps, if possible.
4160     return true;
4161   }
4162   if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
4163     // Too many traps globally.
4164     // Note that we use cumulative trap_count, not just md->trap_count.
4165     if (log()) {
4166       int mcount = (logmd == nullptr)? -1: (int)logmd->trap_count(reason);
4167       log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
4168                   Deoptimization::trap_reason_name(reason),
4169                   mcount, trap_count(reason));
4170     }
4171     return true;
4172   } else {
4173     // The coast is clear.
4174     return false;
4175   }
4176 }
4177 
4178 //--------------------------too_many_recompiles--------------------------------
4179 // Report if there are too many recompiles at the current method and bci.
4180 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
4181 // Is not eager to return true, since this will cause the compiler to use

4231   _allowed_reasons = 0;
4232   if (is_method_compilation()) {
4233     for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
4234       assert(rs < BitsPerInt, "recode bit map");
4235       if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
4236         _allowed_reasons |= nth_bit(rs);
4237       }
4238     }
4239   }
4240 }
4241 
4242 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4243   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4244 }
4245 
4246 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4247   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4248 }
4249 
4250 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4251   if (holder->is_initialized() && !do_clinit_barriers()) {
4252     return false;
4253   }
4254   if (holder->is_being_initialized() || do_clinit_barriers()) {
4255     if (accessing_method->holder() == holder) {
4256       // Access inside a class. The barrier can be elided when access happens in <clinit>,
4257       // <init>, or a static method. In all those cases, there was an initialization
4258       // barrier on the holder klass passed.
4259       if (accessing_method->is_static_initializer() ||
4260           accessing_method->is_object_initializer() ||
4261           accessing_method->is_static()) {
4262         return false;
4263       }
4264     } else if (accessing_method->holder()->is_subclass_of(holder)) {
4265       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4266       // In case of <init> or a static method, the barrier is on the subclass is not enough:
4267       // child class can become fully initialized while its parent class is still being initialized.
4268       if (accessing_method->is_static_initializer()) {
4269         return false;
4270       }
4271     }
4272     ciMethod* root = method(); // the root method of compilation
4273     if (root != accessing_method) {
4274       return needs_clinit_barrier(holder, root); // check access in the context of compilation root

4534 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) {
4535   if (ctrl != nullptr) {
4536     // Express control dependency by a CastII node with a narrow type.
4537     // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4538     // node from floating above the range check during loop optimizations. Otherwise, the
4539     // ConvI2L node may be eliminated independently of the range check, causing the data path
4540     // to become TOP while the control path is still there (although it's unreachable).
4541     value = new CastIINode(ctrl, value, itype, carry_dependency ? ConstraintCastNode::StrongDependency : ConstraintCastNode::RegularDependency, true /* range check dependency */);
4542     value = phase->transform(value);
4543   }
4544   const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4545   return phase->transform(new ConvI2LNode(value, ltype));
4546 }
4547 
4548 void Compile::dump_print_inlining() {
4549   inline_printer()->print_on(tty);
4550 }
4551 
4552 void Compile::log_late_inline(CallGenerator* cg) {
4553   if (log() != nullptr) {
4554     log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
4555                 cg->unique_id());
4556     JVMState* p = cg->call_node()->jvms();
4557     while (p != nullptr) {
4558       log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
4559       p = p->caller();
4560     }
4561     log()->tail("late_inline");
4562   }
4563 }
4564 
4565 void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
4566   log_late_inline(cg);
4567   if (log() != nullptr) {
4568     log()->inline_fail(msg);
4569   }
4570 }
4571 
4572 void Compile::log_inline_id(CallGenerator* cg) {
4573   if (log() != nullptr) {
4574     // The LogCompilation tool needs a unique way to identify late
< prev index next >