< prev index next > src/hotspot/share/compiler/compilationPolicy.cpp
Print this page
* questions.
*
*/
#include "cds/aotLinkedClassBulkLoader.hpp"
+ #include "code/aotCodeCache.hpp"
#include "code/scopeDesc.hpp"
#include "compiler/compilationPolicy.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compilerDefinitions.inline.hpp"
#include "compiler/compilerOracle.hpp"
+ #include "compiler/recompilationPolicy.hpp"
#include "memory/resourceArea.hpp"
#include "oops/method.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "oops/trainingData.hpp"
#endif
int64_t CompilationPolicy::_start_time = 0;
int CompilationPolicy::_c1_count = 0;
int CompilationPolicy::_c2_count = 0;
+ int CompilationPolicy::_c3_count = 0;
+ int CompilationPolicy::_ac_count = 0;
double CompilationPolicy::_increase_threshold_at_ratio = 0;
CompilationPolicy::TrainingReplayQueue CompilationPolicy::_training_replay_queue;
void compilationPolicy_init() {
if (ReplayCompiles) return false;
if (m->has_compiled_code()) return false; // already compiled
if (!can_be_compiled(m, comp_level)) return false;
- return !UseInterpreter || // must compile all methods
+ return !UseInterpreter || // must compile all methods
(AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
}
void CompilationPolicy::maybe_compile_early(const methodHandle& m, TRAPS) {
if (m->method_holder()->is_not_initialized()) {
if (!m->is_native() && MethodTrainingData::have_data()) {
MethodTrainingData* mtd = MethodTrainingData::find_fast(m);
if (mtd == nullptr) {
return; // there is no training data recorded for m
}
+ bool recompile = m->code_has_clinit_barriers();
CompLevel cur_level = static_cast<CompLevel>(m->highest_comp_level());
CompLevel next_level = trained_transition(m, cur_level, mtd, THREAD);
- if (next_level != cur_level && can_be_compiled(m, next_level) && !CompileBroker::compilation_is_in_queue(m)) {
+ if ((next_level != cur_level || recompile) && can_be_compiled(m, next_level) && !CompileBroker::compilation_is_in_queue(m)) {
+ bool requires_online_compilation = false;
+ CompileTrainingData* ctd = mtd->last_toplevel_compile(next_level);
+ if (ctd != nullptr) {
+ requires_online_compilation = (ctd->init_deps_left() > 0);
+ }
+ if (requires_online_compilation && recompile) {
+ return;
+ }
if (PrintTieredEvents) {
print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, next_level);
}
- CompileBroker::compile_method(m, InvocationEntryBci, next_level, 0, CompileTask::Reason_MustBeCompiled, THREAD);
+ CompileBroker::compile_method(m, InvocationEntryBci, next_level, 0, requires_online_compilation, CompileTask::Reason_MustBeCompiled, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
}
}
}
}
+ void CompilationPolicy::maybe_compile_early_after_init(const methodHandle& m, TRAPS) {
+ assert(m->method_holder()->is_initialized(), "Should be called after class initialization");
+ maybe_compile_early(m, THREAD);
+ }
+
void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
// don't force compilation, resolve was on behalf of compiler
return;
}
// This path is unusual, mostly used by the '-Xcomp' stress test mode.
CompLevel level = initial_compile_level(m);
if (PrintTieredEvents) {
print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, level);
}
- CompileBroker::compile_method(m, InvocationEntryBci, level, 0, CompileTask::Reason_MustBeCompiled, THREAD);
+ CompileBroker::compile_method(m, InvocationEntryBci, level, 0, false, CompileTask::Reason_MustBeCompiled, THREAD);
}
}
void CompilationPolicy::replay_training_at_init_impl(InstanceKlass* klass, TRAPS) {
if (!klass->has_init_deps_processed()) {
KlassTrainingData* ktd = KlassTrainingData::find(klass);
if (ktd != nullptr) {
guarantee(ktd->has_holder(), "");
ktd->notice_fully_initialized(); // sets klass->has_init_deps_processed bit
assert(klass->has_init_deps_processed(), "");
+
if (AOTCompileEagerly) {
ktd->iterate_comp_deps([&](CompileTrainingData* ctd) {
if (ctd->init_deps_left() == 0) {
MethodTrainingData* mtd = ctd->method();
if (mtd->has_holder()) {
}
}
}
}
+ void CompilationPolicy::flush_replay_training_at_init(TRAPS) {
+ MonitorLocker locker(THREAD, TrainingReplayQueue_lock);
+ while (!_training_replay_queue.is_empty_unlocked()) {
+ locker.wait(); // let the replay training thread drain the queue
+ }
+ }
+
void CompilationPolicy::replay_training_at_init(InstanceKlass* klass, TRAPS) {
assert(klass->is_initialized(), "");
if (TrainingData::have_data() && klass->is_shared()) {
_training_replay_queue.push(klass, TrainingReplayQueue_lock, THREAD);
}
st->print_cr("%3d: " INTPTR_FORMAT " %s", ++pos, p2i(ik), ik->external_name());
}
}
void CompilationPolicy::replay_training_at_init_loop(TRAPS) {
- while (!CompileBroker::is_compilation_disabled_forever()) {
+ while (!CompileBroker::is_compilation_disabled_forever() || AOTVerifyTrainingData) {
InstanceKlass* ik = _training_replay_queue.pop(TrainingReplayQueue_lock, THREAD);
if (ik != nullptr) {
replay_training_at_init_impl(ik, THREAD);
}
}
if (CompilationModeFlag::quick_internal()) {
#if INCLUDE_JVMCI
if (UseJVMCICompiler) {
AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
if (comp != nullptr && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
- return true;
+ return !AOTCodeCache::is_C3_on();
}
}
#endif
}
return false;
tty->print("compile");
break;
case FORCE_COMPILE:
tty->print("force-compile");
break;
+ case FORCE_RECOMPILE:
+ tty->print("force-recompile");
+ break;
case REMOVE_FROM_QUEUE:
tty->print("remove-from-queue");
break;
case UPDATE_IN_QUEUE:
tty->print("update-in-queue");
tty->print(" rate=");
if (m->prev_time() == 0) tty->print("n/a");
else tty->print("%f", m->rate());
+ RecompilationPolicy::print_load_average();
+
tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
if (type != COMPILE) {
print_counters("", m);
tty->print_cr("]");
}
void CompilationPolicy::initialize() {
if (!CompilerConfig::is_interpreter_only()) {
+ if (AOTCodeCache::is_dumping_code()) {
+ // Assembly phase runs C1 and C2 compilation in separate phases,
+ // and can use all the CPU threads it can reach. Adjust the common
+ // options before policy starts overwriting them. There is a block
+ // at the very end that overrides final thread counts.
+ if (FLAG_IS_DEFAULT(UseDynamicNumberOfCompilerThreads)) {
+ FLAG_SET_ERGO(UseDynamicNumberOfCompilerThreads, false);
+ }
+ if (FLAG_IS_DEFAULT(CICompilerCount)) {
+ FLAG_SET_ERGO(CICompilerCount, MAX2(2, os::active_processor_count()));
+ }
+ }
int count = CICompilerCount;
bool c1_only = CompilerConfig::is_c1_only();
bool c2_only = CompilerConfig::is_c2_or_jvmci_compiler_only();
#ifdef _LP64
if (UseJVMCICompiler && UseJVMCINativeLibrary) {
int libjvmci_count = MAX2((int) (count * JVMCINativeLibraryThreadFraction), 1);
int c1_count = MAX2(count - libjvmci_count, 1);
set_c2_count(libjvmci_count);
set_c1_count(c1_count);
+ } else if (AOTCodeCache::is_C3_on()) {
+ set_c1_count(MAX2(count / 3, 1));
+ set_c2_count(MAX2(count - c1_count(), 1));
+ set_c3_count(1);
} else
#endif
{
set_c1_count(MAX2(count / 3, 1));
set_c2_count(MAX2(count - c1_count(), 1));
}
}
+ if (AOTCodeCache::is_dumping_code()) {
+ set_c1_count(count);
+ set_c2_count(count);
+ count *= 2; // satisfy the assert below
+ }
+ if (AOTCodeCache::is_code_load_thread_on()) {
+ set_ac_count((c1_only || c2_only) ? 1 : 2); // At minimum we need 2 threads to load C1 and C2 cached code in parallel
+ }
assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
set_increase_threshold_at_ratio();
}
+
set_start_time(nanos_to_millis(os::javaTimeNanos()));
}
#ifdef ASSERT
if (task->is_unloaded()) {
compile_queue->remove_and_mark_stale(task);
task = next_task;
continue;
}
+ if (task->is_aot()) {
+ // AOTCodeCache tasks are on separate queue, and they should load fast. There is no need to walk
+ // the rest of the queue, just take the task and go.
+ return task;
+ }
if (task->is_blocking() && task->compile_reason() == CompileTask::Reason_Whitebox) {
// CTW tasks, submitted as blocking Whitebox requests, do not participate in rate
// selection and/or any level adjustments. Just return them in order.
return task;
}
if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, mh) && !is_old(mh)) {
if (PrintTieredEvents) {
print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
}
method->clear_queued_for_compilation();
+ method->set_pending_queue_processed(false);
compile_queue->remove_and_mark_stale(task);
task = next_task;
continue;
}
update_rate(t, mh);
- if (max_task == nullptr || compare_methods(method, max_method)) {
+ if (max_task == nullptr || compare_methods(method, max_method) || compare_tasks(task, max_task)) {
// Select a method with the highest rate
max_task = task;
max_method = method;
}
if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
max_task->set_comp_level(CompLevel_limited_profile);
- if (CompileBroker::compilation_is_complete(max_method_h, max_task->osr_bci(), CompLevel_limited_profile)) {
+ if (CompileBroker::compilation_is_complete(max_method_h(), max_task->osr_bci(), CompLevel_limited_profile,
+ false /* requires_online_compilation */,
+ CompileTask::Reason_None)) {
if (PrintTieredEvents) {
print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
}
compile_queue->remove_and_mark_stale(max_task);
max_method->clear_queued_for_compilation();
if (PrintTieredEvents) {
print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
}
}
+
return max_task;
}
void CompilationPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
if (PrintTieredEvents) {
print_event(COMPILE, mh(), mh(), bci, level);
}
int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
update_rate(nanos_to_millis(os::javaTimeNanos()), mh);
- CompileBroker::compile_method(mh, bci, level, hot_count, CompileTask::Reason_Tiered, THREAD);
+ bool requires_online_compilation = false;
+ if (TrainingData::have_data()) {
+ MethodTrainingData* mtd = MethodTrainingData::find_fast(mh);
+ if (mtd != nullptr) {
+ CompileTrainingData* ctd = mtd->last_toplevel_compile(level);
+ if (ctd != nullptr) {
+ requires_online_compilation = (ctd->init_deps_left() > 0);
+ }
+ }
+ }
+ CompileBroker::compile_method(mh, bci, level, hot_count, requires_online_compilation, CompileTask::Reason_Tiered, THREAD);
}
}
// update_rate() is called from select_task() while holding a compile queue lock.
void CompilationPolicy::update_rate(int64_t t, const methodHandle& method) {
}
}
return false;
}
+ bool CompilationPolicy::compare_tasks(CompileTask* x, CompileTask* y) {
+ assert(!x->is_aot() && !y->is_aot(), "AOT code caching tasks are not expected here");
+ if (x->compile_reason() != y->compile_reason() && y->compile_reason() == CompileTask::Reason_MustBeCompiled) {
+ return true;
+ }
+ return false;
+ }
+
// Is method profiled enough?
bool CompilationPolicy::is_method_profiled(const methodHandle& method) {
MethodData* mdo = method->method_data();
if (mdo != nullptr) {
int i = mdo->invocation_count_delta();
}
if (is_old(method)) {
return true;
}
-
int i = method->invocation_count();
int b = method->backedge_count();
double k = Tier0ProfilingStartPercentage / 100.0;
// If the top level compiler is not keeping up, delay profiling.
< prev index next >