< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page

 617       _options(options),
 618       _method(target),
 619       _entry_bci(osr_bci),
 620       _ilt(nullptr),
 621       _stub_function(nullptr),
 622       _stub_name(nullptr),
 623       _stub_entry_point(nullptr),
 624       _max_node_limit(MaxNodeLimit),
 625       _post_loop_opts_phase(false),
 626       _allow_macro_nodes(true),
 627       _inlining_progress(false),
 628       _inlining_incrementally(false),
 629       _do_cleanup(false),
 630       _has_reserved_stack_access(target->has_reserved_stack_access()),
 631 #ifndef PRODUCT
 632       _igv_idx(0),
 633       _trace_opto_output(directive->TraceOptoOutputOption),
 634 #endif
 635       _has_method_handle_invokes(false),
 636       _clinit_barrier_on_entry(false),

 637       _stress_seed(0),
 638       _comp_arena(mtCompiler),
 639       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 640       _env(ci_env),
 641       _directive(directive),
 642       _log(ci_env->log()),
 643       _first_failure_details(nullptr),
 644       _intrinsics(comp_arena(), 0, 0, nullptr),
 645       _macro_nodes(comp_arena(), 8, 0, nullptr),
 646       _parse_predicates(comp_arena(), 8, 0, nullptr),
 647       _template_assertion_predicate_opaqs(comp_arena(), 8, 0, nullptr),
 648       _expensive_nodes(comp_arena(), 8, 0, nullptr),
 649       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 650       _unstable_if_traps(comp_arena(), 8, 0, nullptr),
 651       _coarsened_locks(comp_arena(), 8, 0, nullptr),
 652       _congraph(nullptr),
 653       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 654           _unique(0),
 655       _dead_node_count(0),
 656       _dead_node_list(comp_arena()),

 894     : Phase(Compiler),
 895       _compile_id(0),
 896       _options(Options::for_runtime_stub()),
 897       _method(nullptr),
 898       _entry_bci(InvocationEntryBci),
 899       _stub_function(stub_function),
 900       _stub_name(stub_name),
 901       _stub_entry_point(nullptr),
 902       _max_node_limit(MaxNodeLimit),
 903       _post_loop_opts_phase(false),
 904       _allow_macro_nodes(true),
 905       _inlining_progress(false),
 906       _inlining_incrementally(false),
 907       _has_reserved_stack_access(false),
 908 #ifndef PRODUCT
 909       _igv_idx(0),
 910       _trace_opto_output(directive->TraceOptoOutputOption),
 911 #endif
 912       _has_method_handle_invokes(false),
 913       _clinit_barrier_on_entry(false),

 914       _stress_seed(0),
 915       _comp_arena(mtCompiler),
 916       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 917       _env(ci_env),
 918       _directive(directive),
 919       _log(ci_env->log()),
 920       _first_failure_details(nullptr),
 921       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 922       _congraph(nullptr),
 923       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 924           _unique(0),
 925       _dead_node_count(0),
 926       _dead_node_list(comp_arena()),
 927       _node_arena_one(mtCompiler),
 928       _node_arena_two(mtCompiler),
 929       _node_arena(&_node_arena_one),
 930       _mach_constant_base_node(nullptr),
 931       _Compile_types(mtCompiler),
 932       _initial_gvn(nullptr),
 933       _igvn_worklist(nullptr),

1053   set_do_scheduling(OptoScheduling);
1054 
1055   set_do_vector_loop(false);
1056   set_has_monitors(false);
1057   set_has_scoped_access(false);
1058 
1059   if (AllowVectorizeOnDemand) {
1060     if (has_method() && _directive->VectorizeOption) {
1061       set_do_vector_loop(true);
1062       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
1063     } else if (has_method() && method()->name() != nullptr &&
1064                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1065       set_do_vector_loop(true);
1066     }
1067   }
1068   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1069   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})
1070 
1071   _max_node_limit = _directive->MaxNodeLimitOption;
1072 
1073   if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() && method()->needs_clinit_barrier()) {

1074     set_clinit_barrier_on_entry(true);



1075   }
1076   if (debug_info()->recording_non_safepoints()) {
1077     set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1078                         (comp_arena(), 8, 0, nullptr));
1079     set_default_node_notes(Node_Notes::make(this));
1080   }
1081 
1082   const int grow_ats = 16;
1083   _max_alias_types = grow_ats;
1084   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1085   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1086   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1087   {
1088     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1089   }
1090   // Initialize the first few types.
1091   _alias_types[AliasIdxTop]->Init(AliasIdxTop, nullptr);
1092   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1093   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1094   _num_alias_types = AliasIdxRaw+1;

4024       frc.get_float_count() > 32 &&
4025       frc.get_double_count() == 0 &&
4026       (10 * frc.get_call_count() < frc.get_float_count()) ) {
4027     set_24_bit_selection_and_mode(false, true);
4028   }
4029 #endif // IA32
4030 
4031   set_java_calls(frc.get_java_call_count());
4032   set_inner_loops(frc.get_inner_loop_count());
4033 
4034   // No infinite loops, no reason to bail out.
4035   return false;
4036 }
4037 
4038 //-----------------------------too_many_traps----------------------------------
4039 // Report if there are too many traps at the current method and bci.
4040 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
4041 bool Compile::too_many_traps(ciMethod* method,
4042                              int bci,
4043                              Deoptimization::DeoptReason reason) {



4044   ciMethodData* md = method->method_data();
4045   if (md->is_empty()) {
4046     // Assume the trap has not occurred, or that it occurred only
4047     // because of a transient condition during start-up in the interpreter.
4048     return false;
4049   }
4050   ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
4051   if (md->has_trap_at(bci, m, reason) != 0) {
4052     // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
4053     // Also, if there are multiple reasons, or if there is no per-BCI record,
4054     // assume the worst.
4055     if (log())
4056       log()->elem("observe trap='%s' count='%d'",
4057                   Deoptimization::trap_reason_name(reason),
4058                   md->trap_count(reason));
4059     return true;
4060   } else {
4061     // Ignore method/bci and see if there have been too many globally.
4062     return too_many_traps(reason, md);
4063   }

4138   _allowed_reasons = 0;
4139   if (is_method_compilation()) {
4140     for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
4141       assert(rs < BitsPerInt, "recode bit map");
4142       if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
4143         _allowed_reasons |= nth_bit(rs);
4144       }
4145     }
4146   }
4147 }
4148 
4149 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4150   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4151 }
4152 
4153 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4154   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4155 }
4156 
4157 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4158   if (holder->is_initialized()) {
4159     return false;
4160   }
4161   if (holder->is_being_initialized()) {
4162     if (accessing_method->holder() == holder) {
4163       // Access inside a class. The barrier can be elided when access happens in <clinit>,
4164       // <init>, or a static method. In all those cases, there was an initialization
4165       // barrier on the holder klass passed.
4166       if (accessing_method->is_static_initializer() ||
4167           accessing_method->is_object_initializer() ||
4168           accessing_method->is_static()) {
4169         return false;
4170       }
4171     } else if (accessing_method->holder()->is_subclass_of(holder)) {
4172       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4173       // In case of <init> or a static method, the barrier is on the subclass is not enough:
4174       // child class can become fully initialized while its parent class is still being initialized.
4175       if (accessing_method->is_static_initializer()) {
4176         return false;
4177       }
4178     }
4179     ciMethod* root = method(); // the root method of compilation
4180     if (root != accessing_method) {
4181       return needs_clinit_barrier(holder, root); // check access in the context of compilation root

4415 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) {
4416   if (ctrl != nullptr) {
4417     // Express control dependency by a CastII node with a narrow type.
4418     // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4419     // node from floating above the range check during loop optimizations. Otherwise, the
4420     // ConvI2L node may be eliminated independently of the range check, causing the data path
4421     // to become TOP while the control path is still there (although it's unreachable).
4422     value = new CastIINode(ctrl, value, itype, carry_dependency ? ConstraintCastNode::StrongDependency : ConstraintCastNode::RegularDependency, true /* range check dependency */);
4423     value = phase->transform(value);
4424   }
4425   const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4426   return phase->transform(new ConvI2LNode(value, ltype));
4427 }
4428 
4429 void Compile::dump_print_inlining() {
4430   inline_printer()->print_on(tty);
4431 }
4432 
4433 void Compile::log_late_inline(CallGenerator* cg) {
4434   if (log() != nullptr) {
4435     log()->head("late_inline method='%d'  inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
4436                 cg->unique_id());
4437     JVMState* p = cg->call_node()->jvms();
4438     while (p != nullptr) {
4439       log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
4440       p = p->caller();
4441     }
4442     log()->tail("late_inline");
4443   }
4444 }
4445 
4446 void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
4447   log_late_inline(cg);
4448   if (log() != nullptr) {
4449     log()->inline_fail(msg);
4450   }
4451 }
4452 
4453 void Compile::log_inline_id(CallGenerator* cg) {
4454   if (log() != nullptr) {
4455     // The LogCompilation tool needs a unique way to identify late

 617       _options(options),
 618       _method(target),
 619       _entry_bci(osr_bci),
 620       _ilt(nullptr),
 621       _stub_function(nullptr),
 622       _stub_name(nullptr),
 623       _stub_entry_point(nullptr),
 624       _max_node_limit(MaxNodeLimit),
 625       _post_loop_opts_phase(false),
 626       _allow_macro_nodes(true),
 627       _inlining_progress(false),
 628       _inlining_incrementally(false),
 629       _do_cleanup(false),
 630       _has_reserved_stack_access(target->has_reserved_stack_access()),
 631 #ifndef PRODUCT
 632       _igv_idx(0),
 633       _trace_opto_output(directive->TraceOptoOutputOption),
 634 #endif
 635       _has_method_handle_invokes(false),
 636       _clinit_barrier_on_entry(false),
 637       _has_clinit_barriers(false),
 638       _stress_seed(0),
 639       _comp_arena(mtCompiler),
 640       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 641       _env(ci_env),
 642       _directive(directive),
 643       _log(ci_env->log()),
 644       _first_failure_details(nullptr),
 645       _intrinsics(comp_arena(), 0, 0, nullptr),
 646       _macro_nodes(comp_arena(), 8, 0, nullptr),
 647       _parse_predicates(comp_arena(), 8, 0, nullptr),
 648       _template_assertion_predicate_opaqs(comp_arena(), 8, 0, nullptr),
 649       _expensive_nodes(comp_arena(), 8, 0, nullptr),
 650       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 651       _unstable_if_traps(comp_arena(), 8, 0, nullptr),
 652       _coarsened_locks(comp_arena(), 8, 0, nullptr),
 653       _congraph(nullptr),
 654       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 655           _unique(0),
 656       _dead_node_count(0),
 657       _dead_node_list(comp_arena()),

 895     : Phase(Compiler),
 896       _compile_id(0),
 897       _options(Options::for_runtime_stub()),
 898       _method(nullptr),
 899       _entry_bci(InvocationEntryBci),
 900       _stub_function(stub_function),
 901       _stub_name(stub_name),
 902       _stub_entry_point(nullptr),
 903       _max_node_limit(MaxNodeLimit),
 904       _post_loop_opts_phase(false),
 905       _allow_macro_nodes(true),
 906       _inlining_progress(false),
 907       _inlining_incrementally(false),
 908       _has_reserved_stack_access(false),
 909 #ifndef PRODUCT
 910       _igv_idx(0),
 911       _trace_opto_output(directive->TraceOptoOutputOption),
 912 #endif
 913       _has_method_handle_invokes(false),
 914       _clinit_barrier_on_entry(false),
 915       _has_clinit_barriers(false),
 916       _stress_seed(0),
 917       _comp_arena(mtCompiler),
 918       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 919       _env(ci_env),
 920       _directive(directive),
 921       _log(ci_env->log()),
 922       _first_failure_details(nullptr),
 923       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 924       _congraph(nullptr),
 925       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 926           _unique(0),
 927       _dead_node_count(0),
 928       _dead_node_list(comp_arena()),
 929       _node_arena_one(mtCompiler),
 930       _node_arena_two(mtCompiler),
 931       _node_arena(&_node_arena_one),
 932       _mach_constant_base_node(nullptr),
 933       _Compile_types(mtCompiler),
 934       _initial_gvn(nullptr),
 935       _igvn_worklist(nullptr),

1055   set_do_scheduling(OptoScheduling);
1056 
1057   set_do_vector_loop(false);
1058   set_has_monitors(false);
1059   set_has_scoped_access(false);
1060 
1061   if (AllowVectorizeOnDemand) {
1062     if (has_method() && _directive->VectorizeOption) {
1063       set_do_vector_loop(true);
1064       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
1065     } else if (has_method() && method()->name() != nullptr &&
1066                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1067       set_do_vector_loop(true);
1068     }
1069   }
1070   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1071   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})
1072 
1073   _max_node_limit = _directive->MaxNodeLimitOption;
1074 
1075   if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() &&
1076       (method()->needs_clinit_barrier() || (do_clinit_barriers() && method()->is_static()))) {
1077     set_clinit_barrier_on_entry(true);
1078     if (do_clinit_barriers()) {
1079       set_has_clinit_barriers(true); // Entry clinit barrier is in prolog code.
1080     }
1081   }
1082   if (debug_info()->recording_non_safepoints()) {
1083     set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1084                         (comp_arena(), 8, 0, nullptr));
1085     set_default_node_notes(Node_Notes::make(this));
1086   }
1087 
1088   const int grow_ats = 16;
1089   _max_alias_types = grow_ats;
1090   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1091   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1092   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1093   {
1094     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1095   }
1096   // Initialize the first few types.
1097   _alias_types[AliasIdxTop]->Init(AliasIdxTop, nullptr);
1098   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1099   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1100   _num_alias_types = AliasIdxRaw+1;

4030       frc.get_float_count() > 32 &&
4031       frc.get_double_count() == 0 &&
4032       (10 * frc.get_call_count() < frc.get_float_count()) ) {
4033     set_24_bit_selection_and_mode(false, true);
4034   }
4035 #endif // IA32
4036 
4037   set_java_calls(frc.get_java_call_count());
4038   set_inner_loops(frc.get_inner_loop_count());
4039 
4040   // No infinite loops, no reason to bail out.
4041   return false;
4042 }
4043 
4044 //-----------------------------too_many_traps----------------------------------
4045 // Report if there are too many traps at the current method and bci.
4046 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
4047 bool Compile::too_many_traps(ciMethod* method,
4048                              int bci,
4049                              Deoptimization::DeoptReason reason) {
4050   if (method->has_trap_at(bci)) {
4051     return true;
4052   }
4053   ciMethodData* md = method->method_data();
4054   if (md->is_empty()) {
4055     // Assume the trap has not occurred, or that it occurred only
4056     // because of a transient condition during start-up in the interpreter.
4057     return false;
4058   }
4059   ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
4060   if (md->has_trap_at(bci, m, reason) != 0) {
4061     // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
4062     // Also, if there are multiple reasons, or if there is no per-BCI record,
4063     // assume the worst.
4064     if (log())
4065       log()->elem("observe trap='%s' count='%d'",
4066                   Deoptimization::trap_reason_name(reason),
4067                   md->trap_count(reason));
4068     return true;
4069   } else {
4070     // Ignore method/bci and see if there have been too many globally.
4071     return too_many_traps(reason, md);
4072   }

4147   _allowed_reasons = 0;
4148   if (is_method_compilation()) {
4149     for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
4150       assert(rs < BitsPerInt, "recode bit map");
4151       if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
4152         _allowed_reasons |= nth_bit(rs);
4153       }
4154     }
4155   }
4156 }
4157 
4158 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4159   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4160 }
4161 
4162 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4163   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4164 }
4165 
4166 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4167   if (holder->is_initialized() && !do_clinit_barriers()) {
4168     return false;
4169   }
4170   if (holder->is_being_initialized() || do_clinit_barriers()) {
4171     if (accessing_method->holder() == holder) {
4172       // Access inside a class. The barrier can be elided when access happens in <clinit>,
4173       // <init>, or a static method. In all those cases, there was an initialization
4174       // barrier on the holder klass passed.
4175       if (accessing_method->is_static_initializer() ||
4176           accessing_method->is_object_initializer() ||
4177           accessing_method->is_static()) {
4178         return false;
4179       }
4180     } else if (accessing_method->holder()->is_subclass_of(holder)) {
4181       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4182       // In case of <init> or a static method, the barrier is on the subclass is not enough:
4183       // child class can become fully initialized while its parent class is still being initialized.
4184       if (accessing_method->is_static_initializer()) {
4185         return false;
4186       }
4187     }
4188     ciMethod* root = method(); // the root method of compilation
4189     if (root != accessing_method) {
4190       return needs_clinit_barrier(holder, root); // check access in the context of compilation root

4424 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) {
4425   if (ctrl != nullptr) {
4426     // Express control dependency by a CastII node with a narrow type.
4427     // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4428     // node from floating above the range check during loop optimizations. Otherwise, the
4429     // ConvI2L node may be eliminated independently of the range check, causing the data path
4430     // to become TOP while the control path is still there (although it's unreachable).
4431     value = new CastIINode(ctrl, value, itype, carry_dependency ? ConstraintCastNode::StrongDependency : ConstraintCastNode::RegularDependency, true /* range check dependency */);
4432     value = phase->transform(value);
4433   }
4434   const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4435   return phase->transform(new ConvI2LNode(value, ltype));
4436 }
4437 
4438 void Compile::dump_print_inlining() {
4439   inline_printer()->print_on(tty);
4440 }
4441 
4442 void Compile::log_late_inline(CallGenerator* cg) {
4443   if (log() != nullptr) {
4444     log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
4445                 cg->unique_id());
4446     JVMState* p = cg->call_node()->jvms();
4447     while (p != nullptr) {
4448       log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
4449       p = p->caller();
4450     }
4451     log()->tail("late_inline");
4452   }
4453 }
4454 
4455 void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
4456   log_late_inline(cg);
4457   if (log() != nullptr) {
4458     log()->inline_fail(msg);
4459   }
4460 }
4461 
4462 void Compile::log_inline_id(CallGenerator* cg) {
4463   if (log() != nullptr) {
4464     // The LogCompilation tool needs a unique way to identify late
< prev index next >