< prev index next >

src/hotspot/share/compiler/compilationPolicy.cpp

Print this page
@@ -21,19 +21,22 @@
   * questions.
   *
   */
  
  #include "precompiled.hpp"
+ #include "cds/classPreloader.hpp"
  #include "code/scopeDesc.hpp"
+ #include "code/SCCache.hpp"
  #include "compiler/compilationPolicy.hpp"
  #include "compiler/compileBroker.hpp"
  #include "compiler/compilerDefinitions.inline.hpp"
  #include "compiler/compilerOracle.hpp"
  #include "memory/resourceArea.hpp"
  #include "oops/methodData.hpp"
  #include "oops/method.inline.hpp"
  #include "oops/oop.inline.hpp"
+ #include "oops/trainingData.hpp"
  #include "prims/jvmtiExport.hpp"
  #include "runtime/arguments.hpp"
  #include "runtime/deoptimization.hpp"
  #include "runtime/frame.hpp"
  #include "runtime/frame.inline.hpp"

@@ -49,15 +52,21 @@
  #endif
  #if INCLUDE_JVMCI
  #include "jvmci/jvmci.hpp"
  #endif
  
- jlong CompilationPolicy::_start_time = 0;
+ int64_t CompilationPolicy::_start_time = 0;
  int CompilationPolicy::_c1_count = 0;
  int CompilationPolicy::_c2_count = 0;
+ int CompilationPolicy::_c3_count = 0;
+ int CompilationPolicy::_sc_count = 0;
  double CompilationPolicy::_increase_threshold_at_ratio = 0;
  
+ CompilationPolicy::LoadAverage CompilationPolicy::_load_average;
+ CompilationPolicy::TrainingReplayQueue CompilationPolicy::_training_replay_queue;
+ volatile bool CompilationPolicy::_recompilation_done = false;
+ 
  void compilationPolicy_init() {
    CompilationPolicy::initialize();
  }
  
  int CompilationPolicy::compiler_count(CompLevel comp_level) {

@@ -67,47 +76,260 @@
      return c2_count();
    }
    return 0;
  }
  
+ void CompilationPolicy::sample_load_average() {
+   const int c2_queue_size = CompileBroker::queue_size(CompLevel_full_optimization);
+   _load_average.sample(c2_queue_size);
+ }
+ 
+ bool CompilationPolicy::have_recompilation_work() {
+   if (UseRecompilation && TrainingData::have_data() && TrainingData::have_recompilation_schedule() &&
+                           TrainingData::recompilation_schedule()->length() > 0 && !_recompilation_done) {
+     if (_load_average.value() <= RecompilationLoadAverageThreshold) {
+       return true;
+     }
+   }
+   return false;
+ }
+ 
+ bool CompilationPolicy::recompilation_step(int step, TRAPS) {
+   if (!have_recompilation_work() || os::elapsedTime() < DelayRecompilation) {
+     return false;
+   }
+ 
+   const int size = TrainingData::recompilation_schedule()->length();
+   int i = 0;
+   int count = 0;
+   bool repeat = false;
+   for (; i < size && count < step; i++) {
+     if (!TrainingData::recompilation_status()[i]) {
+       MethodTrainingData* mtd = TrainingData::recompilation_schedule()->at(i);
+       if (!mtd->has_holder()) {
+         Atomic::release_store(&TrainingData::recompilation_status()[i], true);
+         continue;
+       }
+       const Method* method = mtd->holder();
+       InstanceKlass* klass = method->method_holder();
+       if (klass->is_not_initialized()) {
+         repeat = true;
+         continue;
+       }
+       nmethod *nm = method->code();
+       if (nm == nullptr) {
+         repeat = true;
+         continue;
+       }
+ 
+       if (!ForceRecompilation && !(nm->is_scc() && nm->comp_level() == CompLevel_full_optimization)) {
+         // If it's already online-compiled at level 4, mark it as done.
+         if (nm->comp_level() == CompLevel_full_optimization) {
+           Atomic::store(&TrainingData::recompilation_status()[i], true);
+         } else {
+           repeat = true;
+         }
+         continue;
+       }
+       if (Atomic::cmpxchg(&TrainingData::recompilation_status()[i], false, true) == false) {
+         const methodHandle m(THREAD, const_cast<Method*>(method));
+         CompLevel next_level = CompLevel_full_optimization;
+ 
+         if (method->method_data() == nullptr) {
+           create_mdo(m, THREAD);
+         }
+ 
+         if (PrintTieredEvents) {
+           print_event(FORCE_RECOMPILE, m(), m(), InvocationEntryBci, next_level);
+         }
+         CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_full_optimization, methodHandle(), 0,
+                                       true /*requires_online_compilation*/, CompileTask::Reason_MustBeCompiled, THREAD);
+         if (HAS_PENDING_EXCEPTION) {
+           CLEAR_PENDING_EXCEPTION;
+         }
+         count++;
+       }
+     }
+   }
+ 
+   if (i == size && !repeat) {
+     Atomic::release_store(&_recompilation_done, true);
+   }
+   return count > 0;
+ }
+ 
  // Returns true if m must be compiled before executing it
  // This is intended to force compiles for methods (usually for
  // debugging) that would otherwise be interpreted for some reason.
  bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
    // Don't allow Xcomp to cause compiles in replay mode
    if (ReplayCompiles) return false;
  
    if (m->has_compiled_code()) return false;       // already compiled
    if (!can_be_compiled(m, comp_level)) return false;
  
-   return !UseInterpreter ||                                              // must compile all methods
+   return !UseInterpreter ||                                                                        // must compile all methods
           (AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
  }
  
+ void CompilationPolicy::maybe_compile_early(const methodHandle& m, TRAPS) {
+   if (m->method_holder()->is_not_initialized()) {
+     // 'is_not_initialized' means not only '!is_initialized', but also that
+     // initialization has not been started yet ('!being_initialized')
+     // Do not force compilation of methods in uninitialized classes.
+     return;
+   }
+   if (!m->is_native() && MethodTrainingData::have_data()) {
+     MethodTrainingData* mtd = MethodTrainingData::find(m);
+     if (mtd == nullptr) {
+       return;              // there is no training data recorded for m
+     }
+     bool recompile = m->code_has_clinit_barriers();
+     CompLevel cur_level = static_cast<CompLevel>(m->highest_comp_level());
+     CompLevel next_level = trained_transition(m, cur_level, mtd, THREAD);
+     if ((next_level != cur_level || recompile) && can_be_compiled(m, next_level) && !CompileBroker::compilation_is_in_queue(m)) {
+       bool requires_online_compilation = false;
+       CompileTrainingData* ctd = mtd->last_toplevel_compile(next_level);
+       if (ctd != nullptr) {
+         requires_online_compilation = (ctd->init_deps_left() > 0);
+       }
+       if (requires_online_compilation && recompile) {
+         return;
+       }
+       if (PrintTieredEvents) {
+         print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, next_level);
+       }
+       CompileBroker::compile_method(m, InvocationEntryBci, next_level, methodHandle(), 0, requires_online_compilation, CompileTask::Reason_MustBeCompiled, THREAD);
+       if (HAS_PENDING_EXCEPTION) {
+         CLEAR_PENDING_EXCEPTION;
+       }
+     }
+   }
+ }
+ 
+ void CompilationPolicy::maybe_compile_early_after_init(const methodHandle& m, TRAPS) {
+   assert(m->method_holder()->is_initialized(), "Should be called after class initialization");
+   maybe_compile_early(m, THREAD);
+ }
+ 
  void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
+   if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
+     // don't force compilation, resolve was on behalf of compiler
+     return;
+   }
+   if (m->method_holder()->is_not_initialized()) {
+     // 'is_not_initialized' means not only '!is_initialized', but also that
+     // initialization has not been started yet ('!being_initialized')
+     // Do not force compilation of methods in uninitialized classes.
+     // Note that doing this would throw an assert later,
+     // in CompileBroker::compile_method.
+     // We sometimes use the link resolver to do reflective lookups
+     // even before classes are initialized.
+     return;
+   }
+ 
    if (must_be_compiled(m)) {
      // This path is unusual, mostly used by the '-Xcomp' stress test mode.
+     CompLevel level = initial_compile_level(m);
+     if (PrintTieredEvents) {
+       print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, level);
+     }
+     CompileBroker::compile_method(m, InvocationEntryBci, level, methodHandle(), 0, false, CompileTask::Reason_MustBeCompiled, THREAD);
+   }
+ }
  
-     if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
-       // don't force compilation, resolve was on behalf of compiler
-       return;
+ void CompilationPolicy::replay_training_at_init_impl(InstanceKlass* klass, TRAPS) {
+   if (!klass->has_init_deps_processed()) {
+     ResourceMark rm;
+     log_debug(training)("Replay training: %s", klass->external_name());
+ 
+     KlassTrainingData* ktd = KlassTrainingData::find(klass);
+     if (ktd != nullptr) {
+       guarantee(ktd->has_holder(), "");
+       ktd->notice_fully_initialized(); // sets klass->has_init_deps_processed bit
+       assert(klass->has_init_deps_processed(), "");
+ 
+       ktd->iterate_all_comp_deps([&](CompileTrainingData* ctd) {
+         if (ctd->init_deps_left() == 0) {
+           MethodTrainingData* mtd = ctd->method();
+           if (mtd->has_holder()) {
+             const methodHandle mh(THREAD, const_cast<Method*>(mtd->holder()));
+             CompilationPolicy::maybe_compile_early(mh, THREAD);
+           }
+         }
+       });
      }
-     if (m->method_holder()->is_not_initialized()) {
-       // 'is_not_initialized' means not only '!is_initialized', but also that
-       // initialization has not been started yet ('!being_initialized')
-       // Do not force compilation of methods in uninitialized classes.
-       // Note that doing this would throw an assert later,
-       // in CompileBroker::compile_method.
-       // We sometimes use the link resolver to do reflective lookups
-       // even before classes are initialized.
-       return;
+     Array<Method*>* methods = klass->methods();
+     for (int i = 0; i < methods->length(); i++) {
+       const methodHandle mh(THREAD, methods->at(i));
+       CompilationPolicy::maybe_compile_early_after_init(mh, THREAD);
      }
-     CompLevel level = initial_compile_level(m);
-     if (PrintTieredEvents) {
-       print_event(COMPILE, m(), m(), InvocationEntryBci, level);
+   }
+ }
+ 
+ void CompilationPolicy::replay_training_at_init(bool is_on_shutdown, TRAPS) {
+   // Drain pending queue when no concurrent processing thread is present.
+   if (UseConcurrentTrainingReplay) {
+     if (VerifyTrainingData) {
+       MonitorLocker locker(THREAD, TrainingReplayQueue_lock);
+       while (!_training_replay_queue.is_empty_unlocked()) {
+         locker.wait(); // let the replay training thread drain the queue
+       }
+     }
+   } else {
+     do {
+       InstanceKlass* pending = _training_replay_queue.try_pop(TrainingReplayQueue_lock, THREAD);
+       if (pending == nullptr) {
+         break; // drained the queue
+       }
+       if (is_on_shutdown) {
+         LogStreamHandle(Warning, training) log;
+         if (log.is_enabled()) {
+           ResourceMark rm;
+           log.print("pending training replay request: %s%s",
+                     pending->external_name(), (pending->has_preinitialized_mirror() ? " (preinitialized)" : ""));
+         }
+       }
+       replay_training_at_init_impl(pending, THREAD);
+     } while (true);
+   }
+ 
+   if (VerifyTrainingData) {
+     TrainingData::verify();
+   }
+ }
+ 
+ void CompilationPolicy::replay_training_at_init(InstanceKlass* klass, TRAPS) {
+   assert(klass->is_initialized(), "");
+   if (TrainingData::have_data() && klass->is_shared() &&
+       (CompileBroker::replay_initialized() || !klass->has_preinitialized_mirror())) { // ignore preloaded classes during early startup
+     if (UseConcurrentTrainingReplay || !CompileBroker::replay_initialized()) {
+       _training_replay_queue.push(klass, TrainingReplayQueue_lock, THREAD);
+     } else {
+       replay_training_at_init_impl(klass, THREAD);
      }
-     CompileBroker::compile_method(m, InvocationEntryBci, level, methodHandle(), 0, CompileTask::Reason_MustBeCompiled, THREAD);
+     assert(!HAS_PENDING_EXCEPTION, "");
+   }
+ }
+ 
+ // For TrainingReplayQueue
+ template<>
+ void CompilationPolicyUtils::Queue<InstanceKlass>::print_on(outputStream* st) {
+   int pos = 0;
+   for (QueueNode* cur = _head; cur != nullptr; cur = cur->next()) {
+     ResourceMark rm;
+     InstanceKlass* ik = cur->value();
+     st->print_cr("%3d: " INTPTR_FORMAT " %s", ++pos, p2i(ik), ik->external_name());
+   }
+ }
+ 
+ void CompilationPolicy::replay_training_at_init_loop(TRAPS) {
+   precond(UseConcurrentTrainingReplay);
+ 
+   while (!CompileBroker::is_compilation_disabled_forever() || VerifyTrainingData) {
+     InstanceKlass* ik = _training_replay_queue.pop(TrainingReplayQueue_lock, THREAD);
+     replay_training_at_init_impl(ik, THREAD);
    }
  }
  
  static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_level) {
    if (comp_level == CompLevel_any) {

@@ -121,11 +343,11 @@
  }
  
  // Returns true if m is allowed to be compiled
  bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
    // allow any levels for WhiteBox
-   assert(WhiteBoxAPI || comp_level == CompLevel_any || is_compile(comp_level), "illegal compilation level");
+   assert(WhiteBoxAPI || comp_level == CompLevel_any || is_compile(comp_level), "illegal compilation level %d", comp_level);
  
    if (m->is_abstract()) return false;
    if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
  
    // Math intrinsics should never be compiled as this can lead to

@@ -201,11 +423,11 @@
    if (CompilationModeFlag::quick_internal()) {
  #if INCLUDE_JVMCI
      if (UseJVMCICompiler) {
        AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
        if (comp != nullptr && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
-         return true;
+         return !SCCache::is_C3_on();
        }
      }
  #endif
    }
    return false;

@@ -321,11 +543,11 @@
      return k;
    }
    return 1;
  }
  
- void CompilationPolicy::print_counters(const char* prefix, const Method* m) {
+ void CompilationPolicy::print_counters(const char* prefix, Method* m) {
    int invocation_count = m->invocation_count();
    int backedge_count = m->backedge_count();
    MethodData* mdh = m->method_data();
    int mdo_invocations = 0, mdo_backedges = 0;
    int mdo_invocations_start = 0, mdo_backedges_start = 0;

@@ -341,12 +563,40 @@
        mdo_backedges, mdo_backedges_start);
    tty->print(" %smax levels=%d,%d", prefix,
        m->highest_comp_level(), m->highest_osr_comp_level());
  }
  
+ void CompilationPolicy::print_training_data(const char* prefix, Method* method) {
+   methodHandle m(Thread::current(), method);
+   tty->print(" %smtd: ", prefix);
+   MethodTrainingData* mtd = MethodTrainingData::find(m);
+   if (mtd == nullptr) {
+     tty->print("null");
+   } else {
+     MethodData* md = mtd->final_profile();
+     tty->print("mdo=");
+     if (md == nullptr) {
+       tty->print("null");
+     } else {
+       int mdo_invocations = md->invocation_count();
+       int mdo_backedges = md->backedge_count();
+       int mdo_invocations_start = md->invocation_count_start();
+       int mdo_backedges_start = md->backedge_count_start();
+       tty->print("%d(%d), %d(%d)", mdo_invocations, mdo_invocations_start, mdo_backedges, mdo_backedges_start);
+     }
+     CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
+     tty->print(", deps=");
+     if (ctd == nullptr) {
+       tty->print("null");
+     } else {
+       tty->print("%d", ctd->init_deps_left());
+     }
+   }
+ }
+ 
  // Print an event.
- void CompilationPolicy::print_event(EventType type, const Method* m, const Method* im, int bci, CompLevel level) {
+ void CompilationPolicy::print_event(EventType type, Method* m, Method* im, int bci, CompLevel level) {
    bool inlinee_event = m != im;
  
    ttyLocker tty_lock;
    tty->print("%lf: [", os::elapsedTime());
  

@@ -358,10 +608,16 @@
      tty->print("loop");
      break;
    case COMPILE:
      tty->print("compile");
      break;
+   case FORCE_COMPILE:
+     tty->print("force-compile");
+     break;
+   case FORCE_RECOMPILE:
+     tty->print("force-recompile");
+     break;
    case REMOVE_FROM_QUEUE:
      tty->print("remove-from-queue");
      break;
    case UPDATE_IN_QUEUE:
      tty->print("update-in-queue");

@@ -390,10 +646,11 @@
                                        CompileBroker::queue_size(CompLevel_full_optimization));
  
    tty->print(" rate=");
    if (m->prev_time() == 0) tty->print("n/a");
    else tty->print("%f", m->rate());
+   tty->print(" load=%lf", _load_average.value());
  
    tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
                                 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
  
    if (type != COMPILE) {

@@ -423,10 +680,14 @@
      }
      tty->print(" status=");
      if (m->queued_for_compilation()) {
        tty->print("in-queue");
      } else tty->print("idle");
+     print_training_data("", m);
+     if (inlinee_event) {
+       print_training_data("inlinee ", im);
+     }
    }
    tty->print_cr("]");
  }
  
  void CompilationPolicy::initialize() {

@@ -486,24 +747,34 @@
        if (UseJVMCICompiler && UseJVMCINativeLibrary) {
          int libjvmci_count = MAX2((int) (count * JVMCINativeLibraryThreadFraction), 1);
          int c1_count = MAX2(count - libjvmci_count, 1);
          set_c2_count(libjvmci_count);
          set_c1_count(c1_count);
+       } else if (SCCache::is_C3_on()) {
+         set_c1_count(MAX2(count / 3, 1));
+         set_c2_count(MAX2(count - c1_count(), 1));
+         set_c3_count(1);
        } else
  #endif
        {
          set_c1_count(MAX2(count / 3, 1));
          set_c2_count(MAX2(count - c1_count(), 1));
        }
+       if (SCCache::is_code_load_thread_on()) {
+         set_sc_count((c1_only || c2_only) ? 1 : 2); // At minimum we need 2 threads to load C1 and C2 cached code in parallel
+       }
      }
      assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
      set_increase_threshold_at_ratio();
    }
+ 
    set_start_time(nanos_to_millis(os::javaTimeNanos()));
  }
  
  
+ 
+ 
  #ifdef ASSERT
  bool CompilationPolicy::verify_level(CompLevel level) {
    if (TieredCompilation && level > TieredStopAtLevel) {
      return false;
    }

@@ -616,16 +887,16 @@
      mdo->backedge_counter()->set_carry_on_overflow();
    }
  }
  
  // Called with the queue locked and with at least one element
- CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue) {
+ CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue, JavaThread* THREAD) {
    CompileTask *max_blocking_task = nullptr;
    CompileTask *max_task = nullptr;
    Method* max_method = nullptr;
  
-   jlong t = nanos_to_millis(os::javaTimeNanos());
+   int64_t t = nanos_to_millis(os::javaTimeNanos());
    // Iterate through the queue and find a method with a maximum rate.
    for (CompileTask* task = compile_queue->first(); task != nullptr;) {
      CompileTask* next_task = task->next();
      // If a method was unloaded or has been stale for some time, remove it from the queue.
      // Blocking tasks and tasks submitted from whitebox API don't become stale

@@ -633,22 +904,23 @@
        compile_queue->remove_and_mark_stale(task);
        task = next_task;
        continue;
      }
      Method* method = task->method();
-     methodHandle mh(Thread::current(), method);
+     methodHandle mh(THREAD, method);
      if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, mh) && !is_old(mh)) {
        if (PrintTieredEvents) {
          print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
        }
        method->clear_queued_for_compilation();
+       method->set_pending_queue_processed(false);
        compile_queue->remove_and_mark_stale(task);
        task = next_task;
        continue;
      }
      update_rate(t, mh);
-     if (max_task == nullptr || compare_methods(method, max_method)) {
+     if (max_task == nullptr || compare_methods(method, max_method) || compare_tasks(task, max_task)) {
        // Select a method with the highest rate
        max_task = task;
        max_method = method;
      }
  

@@ -669,17 +941,19 @@
      // chance of such compilations timing out.
      max_task = max_blocking_task;
      max_method = max_task->method();
    }
  
-   methodHandle max_method_h(Thread::current(), max_method);
+   methodHandle max_method_h(THREAD, max_method);
  
    if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
        max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
      max_task->set_comp_level(CompLevel_limited_profile);
  
-     if (CompileBroker::compilation_is_complete(max_method_h, max_task->osr_bci(), CompLevel_limited_profile)) {
+     if (CompileBroker::compilation_is_complete(max_method_h(), max_task->osr_bci(), CompLevel_limited_profile,
+                                                false /* requires_online_compilation */,
+                                                CompileTask::Reason_None)) {
        if (PrintTieredEvents) {
          print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
        }
        compile_queue->remove_and_mark_stale(max_task);
        max_method->clear_queued_for_compilation();

@@ -688,11 +962,10 @@
  
      if (PrintTieredEvents) {
        print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
      }
    }
- 
    return max_task;
  }
  
  void CompilationPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
    for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {

@@ -711,10 +984,17 @@
                                        int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
    if (PrintTieredEvents) {
      print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
    }
  
+ #if INCLUDE_JVMCI
+   if (EnableJVMCI && UseJVMCICompiler &&
+       comp_level == CompLevel_full_optimization && !ClassPreloader::class_preloading_finished()) {
+     return nullptr;
+   }
+ #endif
+ 
    if (comp_level == CompLevel_none &&
        JvmtiExport::can_post_interpreter_events() &&
        THREAD->is_interp_only_mode()) {
      return nullptr;
    }

@@ -806,16 +1086,26 @@
      if (PrintTieredEvents) {
        print_event(COMPILE, mh(), mh(), bci, level);
      }
      int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
      update_rate(nanos_to_millis(os::javaTimeNanos()), mh);
-     CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, THREAD);
+     bool requires_online_compilation = false;
+     if (TrainingData::have_data()) {
+       MethodTrainingData* mtd = MethodTrainingData::find(mh);
+       if (mtd != nullptr) {
+         CompileTrainingData* ctd = mtd->last_toplevel_compile(level);
+         if (ctd != nullptr) {
+           requires_online_compilation = (ctd->init_deps_left() > 0);
+         }
+       }
+     }
+     CompileBroker::compile_method(mh, bci, level, mh, hot_count, requires_online_compilation, CompileTask::Reason_Tiered, THREAD);
    }
  }
  
  // update_rate() is called from select_task() while holding a compile queue lock.
- void CompilationPolicy::update_rate(jlong t, const methodHandle& method) {
+ void CompilationPolicy::update_rate(int64_t t, const methodHandle& method) {
    // Skip update if counters are absent.
    // Can't allocate them since we are holding compile queue lock.
    if (method->method_counters() == nullptr)  return;
  
    if (is_old(method)) {

@@ -825,12 +1115,12 @@
      return;
    }
  
    // We don't update the rate if we've just came out of a safepoint.
    // delta_s is the time since last safepoint in milliseconds.
-   jlong delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
-   jlong delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement
+   int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
+   int64_t delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement
    // How many events were there since the last time?
    int event_count = method->invocation_count() + method->backedge_count();
    int delta_e = event_count - method->prev_event_count();
  
    // We should be running for at least 1ms.

@@ -849,13 +1139,13 @@
    }
  }
  
  // Check if this method has been stale for a given number of milliseconds.
  // See select_task().
- bool CompilationPolicy::is_stale(jlong t, jlong timeout, const methodHandle& method) {
-   jlong delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
-   jlong delta_t = t - method->prev_time();
+ bool CompilationPolicy::is_stale(int64_t t, int64_t timeout, const methodHandle& method) {
+   int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
+   int64_t delta_t = t - method->prev_time();
    if (delta_t > timeout && delta_s > timeout) {
      int event_count = method->invocation_count() + method->backedge_count();
      int delta_e = event_count - method->prev_event_count();
      // Return true if there were no events.
      return delta_e == 0;

@@ -889,10 +1179,21 @@
        }
      }
    return false;
  }
  
+ bool CompilationPolicy::compare_tasks(CompileTask* x, CompileTask* y) {
+   if (x->is_scc() && !y->is_scc()) {
+     // x has cached code
+     return true;
+   }
+   if (x->compile_reason() != y->compile_reason() && y->compile_reason() == CompileTask::Reason_MustBeCompiled) {
+     return true;
+   }
+   return false;
+ }
+ 
  // Is method profiled enough?
  bool CompilationPolicy::is_method_profiled(const methodHandle& method) {
    MethodData* mdo = method->method_data();
    if (mdo != nullptr) {
      int i = mdo->invocation_count_delta();

@@ -902,17 +1203,16 @@
    return false;
  }
  
  
  // Determine is a method is mature.
- bool CompilationPolicy::is_mature(Method* method) {
+ bool CompilationPolicy::is_mature(MethodData* mdo) {
    if (Arguments::is_compiler_only()) {
      // Always report profiles as immature with -Xcomp
      return false;
    }
-   methodHandle mh(Thread::current(), method);
-   MethodData* mdo = method->method_data();
+   methodHandle mh(Thread::current(), mdo->method());
    if (mdo != nullptr) {
      int i = mdo->invocation_count();
      int b = mdo->backedge_count();
      double k = ProfileMaturityPercentage / 100.0;
      return CallPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k) || LoopPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k);

@@ -925,13 +1225,23 @@
  // We also take the load on compilers into the account.
  bool CompilationPolicy::should_create_mdo(const methodHandle& method, CompLevel cur_level) {
    if (cur_level != CompLevel_none || force_comp_at_level_simple(method) || CompilationModeFlag::quick_only() || !ProfileInterpreter) {
      return false;
    }
+ 
+   if (TrainingData::have_data()) {
+     MethodTrainingData* mtd = MethodTrainingData::find(method);
+     if (mtd != nullptr && mtd->saw_level(CompLevel_full_optimization)) {
+       return true;
+     }
+     return false;
+   }
+ 
    if (is_old(method)) {
      return true;
    }
+ 
    int i = method->invocation_count();
    int b = method->backedge_count();
    double k = Tier0ProfilingStartPercentage / 100.0;
  
    // If the top level compiler is not keeping up, delay profiling.

@@ -961,11 +1271,11 @@
      return;
    }
    if (mh->method_data() == nullptr) {
      Method::build_profiling_method_data(mh, CHECK_AND_CLEAR);
    }
-   if (ProfileInterpreter) {
+   if (ProfileInterpreter && THREAD->has_last_Java_frame()) {
      MethodData* mdo = mh->method_data();
      if (mdo != nullptr) {
        frame last_frame = THREAD->last_frame();
        if (last_frame.is_interpreted_frame() && mh == last_frame.interpreter_frame_method()) {
          int bci = last_frame.interpreter_frame_bci();

@@ -974,11 +1284,140 @@
        }
      }
    }
  }
  
+ CompLevel CompilationPolicy::trained_transition_from_none(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
+   precond(mtd != nullptr);
+   precond(cur_level == CompLevel_none);
+ 
+   if (mtd->only_inlined() && !mtd->saw_level(CompLevel_full_optimization)) {
+     return CompLevel_none;
+   }
+ 
+   bool training_has_profile = (mtd->final_profile() != nullptr);
+   if (mtd->saw_level(CompLevel_full_optimization) && !training_has_profile) {
+     return CompLevel_full_profile;
+   }
+ 
+   CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
+   switch (highest_training_level) {
+     case CompLevel_limited_profile:
+     case CompLevel_full_profile:
+       return CompLevel_limited_profile;
+     case CompLevel_simple:
+       return CompLevel_simple;
+     case CompLevel_none:
+       return CompLevel_none;
+     default:
+       break;
+   }
+ 
+   // Now handle the case of level 4.
+   assert(highest_training_level == CompLevel_full_optimization, "Unexpected compilation level: %d", highest_training_level);
+   if (!training_has_profile) {
+     // The method was a part of a level 4 compile, but don't have a stored profile,
+     // we need to profile it.
+     return CompLevel_full_profile;
+   }
+   const bool deopt = (static_cast<CompLevel>(method->highest_comp_level()) == CompLevel_full_optimization);
+   // If we deopted, then we reprofile
+   if (deopt && !is_method_profiled(method)) {
+     return CompLevel_full_profile;
+   }
+ 
+   CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
+   assert(ctd != nullptr, "Should have CTD for CompLevel_full_optimization");
+   // With SkipTier2IfPossible and all deps satisfied, go to level 4 immediately
+   if (SkipTier2IfPossible && ctd->init_deps_left() == 0) {
+     if (method->method_data() == nullptr) {
+       create_mdo(method, THREAD);
+     }
+     return CompLevel_full_optimization;
+   }
+ 
+   // Otherwise go to level 2
+   return CompLevel_limited_profile;
+ }
+ 
+ 
+ CompLevel CompilationPolicy::trained_transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
+   precond(mtd != nullptr);
+   precond(cur_level == CompLevel_limited_profile);
+ 
+   // One of the main reasons that we can get here is that we're waiting for the stored C2 code to become ready.
+ 
+   // But first, check if we have a saved profile
+   bool training_has_profile = (mtd->final_profile() != nullptr);
+   if (!training_has_profile) {
+     return CompLevel_full_profile;
+   }
+ 
+ 
+   assert(training_has_profile, "Have to have a profile to be here");
+   // Check if the method is ready
+   CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
+   if (ctd != nullptr && ctd->init_deps_left() == 0) {
+     if (method->method_data() == nullptr) {
+       create_mdo(method, THREAD);
+     }
+     return CompLevel_full_optimization;
+   }
+ 
+   // Otherwise stay at the current level
+   return CompLevel_limited_profile;
+ }
+ 
+ 
+ CompLevel CompilationPolicy::trained_transition_from_full_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
+   precond(mtd != nullptr);
+   precond(cur_level == CompLevel_full_profile);
+ 
+   CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
+   // We have method at the full profile level and we also know that it's possibly an important method.
+   if (highest_training_level == CompLevel_full_optimization && !mtd->only_inlined()) {
+     // Check if it is adequately profiled
+     if (is_method_profiled(method)) {
+       return CompLevel_full_optimization;
+     }
+   }
+ 
+   // Otherwise stay at the current level
+   return CompLevel_full_profile;
+ }
+ 
+ CompLevel CompilationPolicy::trained_transition(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
+   precond(MethodTrainingData::have_data());
+ 
+   // If there is no training data recorded for this method, bail out.
+   if (mtd == nullptr) {
+     return cur_level;
+   }
+ 
+   CompLevel next_level = cur_level;
+   switch(cur_level) {
+     default: break;
+     case CompLevel_none:
+       next_level = trained_transition_from_none(method, cur_level, mtd, THREAD);
+       break;
+     case CompLevel_limited_profile:
+       next_level = trained_transition_from_limited_profile(method, cur_level, mtd, THREAD);
+       break;
+     case CompLevel_full_profile:
+       next_level = trained_transition_from_full_profile(method, cur_level, mtd, THREAD);
+       break;
+   }
  
+   // We don't have any special strategies for the C2-only compilation modes, so just fix up the levels for now.
+   if (CompilationModeFlag::high_only_quick_internal() && CompLevel_simple < next_level && next_level < CompLevel_full_optimization) {
+     return CompLevel_none;
+   }
+   if (CompilationModeFlag::high_only() && next_level < CompLevel_full_optimization) {
+     return CompLevel_none;
+   }
+   return next_level;
+ }
  
  /*
   * Method states:
   *   0 - interpreter (CompLevel_none)
   *   1 - pure C1 (CompLevel_simple)

@@ -1016,97 +1455,144 @@
   *
   */
  
  // Common transition function. Given a predicate determines if a method should transition to another level.
  template<typename Predicate>
- CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_level, bool disable_feedback) {
+ CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD, bool disable_feedback) {
    CompLevel next_level = cur_level;
    int i = method->invocation_count();
    int b = method->backedge_count();
  
    if (force_comp_at_level_simple(method)) {
      next_level = CompLevel_simple;
    } else {
-     if (is_trivial(method) || method->is_native()) {
-       next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_full_optimization : CompLevel_simple;
-     } else {
-       switch(cur_level) {
-       default: break;
-       case CompLevel_none:
-         // If we were at full profile level, would we switch to full opt?
-         if (common<Predicate>(method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
-           next_level = CompLevel_full_optimization;
-         } else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply(method, cur_level, i, b)) {
-           // C1-generated fully profiled code is about 30% slower than the limited profile
-           // code that has only invocation and backedge counters. The observation is that
-           // if C2 queue is large enough we can spend too much time in the fully profiled code
-           // while waiting for C2 to pick the method from the queue. To alleviate this problem
-           // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
-           // we choose to compile a limited profiled version and then recompile with full profiling
-           // when the load on C2 goes down.
-           if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
-               Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
-             next_level = CompLevel_limited_profile;
-           } else {
-             next_level = CompLevel_full_profile;
-           }
-         }
-         break;
-       case CompLevel_limited_profile:
-         if (is_method_profiled(method)) {
-           // Special case: we got here because this method was fully profiled in the interpreter.
-           next_level = CompLevel_full_optimization;
-         } else {
-           MethodData* mdo = method->method_data();
-           if (mdo != nullptr) {
-             if (mdo->would_profile()) {
-               if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
-                                        Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
-                                        Predicate::apply(method, cur_level, i, b))) {
-                 next_level = CompLevel_full_profile;
-               }
-             } else {
-               next_level = CompLevel_full_optimization;
-             }
-           } else {
-             // If there is no MDO we need to profile
-             if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
-                                      Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
-                                      Predicate::apply(method, cur_level, i, b))) {
-               next_level = CompLevel_full_profile;
-             }
-           }
-         }
-         break;
-       case CompLevel_full_profile:
-         {
-           MethodData* mdo = method->method_data();
-           if (mdo != nullptr) {
-             if (mdo->would_profile() || CompilationModeFlag::disable_intermediate()) {
-               int mdo_i = mdo->invocation_count_delta();
-               int mdo_b = mdo->backedge_count_delta();
-               if (Predicate::apply(method, cur_level, mdo_i, mdo_b)) {
-                 next_level = CompLevel_full_optimization;
-               }
-             } else {
-               next_level = CompLevel_full_optimization;
-             }
-           }
+     if (MethodTrainingData::have_data()) {
+       MethodTrainingData* mtd = MethodTrainingData::find(method);
+       if (mtd == nullptr) {
+         // We haven't see compilations of this method in training. It's either very cold or the behavior changed.
+         // Feed it to the standard TF with no profiling delay.
+         next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
+       } else {
+         next_level = trained_transition(method, cur_level, mtd, THREAD);
+         if (cur_level == next_level) {
+           // trained_transtion() is going to return the same level if no startup/warmup optimizations apply.
+           // In order to catch possible pathologies due to behavior change we feed the event to the regular
+           // TF but with profiling delay.
+           next_level = standard_transition<Predicate>(method, cur_level, true /*delay_profiling*/, disable_feedback);
          }
-         break;
        }
+     } else if (is_trivial(method) || method->is_native()) {
+       next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_full_optimization : CompLevel_simple;
+     } else {
+       next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
      }
    }
    return (next_level != cur_level) ? limit_level(next_level) : next_level;
  }
  
  
+ template<typename Predicate>
+ CompLevel CompilationPolicy::standard_transition(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
+   CompLevel next_level = cur_level;
+   switch(cur_level) {
+   default: break;
+   case CompLevel_none:
+     next_level = transition_from_none<Predicate>(method, cur_level, delay_profiling, disable_feedback);
+     break;
+   case CompLevel_limited_profile:
+     next_level = transition_from_limited_profile<Predicate>(method, cur_level, delay_profiling, disable_feedback);
+     break;
+   case CompLevel_full_profile:
+     next_level = transition_from_full_profile<Predicate>(method, cur_level);
+     break;
+   }
+   return next_level;
+ }
+ 
+ template<typename Predicate>
+ CompLevel CompilationPolicy::transition_from_none(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
+   precond(cur_level == CompLevel_none);
+   CompLevel next_level = cur_level;
+   int i = method->invocation_count();
+   int b = method->backedge_count();
+   double scale = delay_profiling ? Tier0ProfileDelayFactor : 1.0;
+   // If we were at full profile level, would we switch to full opt?
+   if (transition_from_full_profile<Predicate>(method, CompLevel_full_profile) == CompLevel_full_optimization) {
+     next_level = CompLevel_full_optimization;
+   } else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply_scaled(method, cur_level, i, b, scale)) {
+     // C1-generated fully profiled code is about 30% slower than the limited profile
+     // code that has only invocation and backedge counters. The observation is that
+     // if C2 queue is large enough we can spend too much time in the fully profiled code
+     // while waiting for C2 to pick the method from the queue. To alleviate this problem
+     // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
+     // we choose to compile a limited profiled version and then recompile with full profiling
+     // when the load on C2 goes down.
+     if (delay_profiling || (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > Tier3DelayOn * compiler_count(CompLevel_full_optimization))) {
+       next_level = CompLevel_limited_profile;
+     } else {
+       next_level = CompLevel_full_profile;
+     }
+   }
+   return next_level;
+ }
+ 
+ template<typename Predicate>
+ CompLevel CompilationPolicy::transition_from_full_profile(const methodHandle& method, CompLevel cur_level) {
+   precond(cur_level == CompLevel_full_profile);
+   CompLevel next_level = cur_level;
+   MethodData* mdo = method->method_data();
+   if (mdo != nullptr) {
+     if (mdo->would_profile() || CompilationModeFlag::disable_intermediate()) {
+       int mdo_i = mdo->invocation_count_delta();
+       int mdo_b = mdo->backedge_count_delta();
+       if (Predicate::apply(method, cur_level, mdo_i, mdo_b)) {
+         next_level = CompLevel_full_optimization;
+       }
+     } else {
+       next_level = CompLevel_full_optimization;
+     }
+   }
+   return next_level;
+ }
+ 
+ template<typename Predicate>
+ CompLevel CompilationPolicy::transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
+   precond(cur_level == CompLevel_limited_profile);
+   CompLevel next_level = cur_level;
+   int i = method->invocation_count();
+   int b = method->backedge_count();
+   double scale = delay_profiling ? Tier2ProfileDelayFactor : 1.0;
+   MethodData* mdo = method->method_data();
+   if (mdo != nullptr) {
+     if (mdo->would_profile()) {
+       if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
+                               Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
+                               Predicate::apply_scaled(method, cur_level, i, b, scale))) {
+         next_level = CompLevel_full_profile;
+       }
+     } else {
+       next_level = CompLevel_full_optimization;
+     }
+   } else {
+     // If there is no MDO we need to profile
+     if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
+                             Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
+                             Predicate::apply_scaled(method, cur_level, i, b, scale))) {
+       next_level = CompLevel_full_profile;
+     }
+   }
+   if (next_level == CompLevel_full_profile && is_method_profiled(method)) {
+     next_level = CompLevel_full_optimization;
+   }
+   return next_level;
+ }
+ 
  
  // Determine if a method should be compiled with a normal entry point at a different level.
- CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cur_level, Thread* thread) {
-   CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common<LoopPredicate>(method, cur_level, true));
-   CompLevel next_level = common<CallPredicate>(method, cur_level, is_old(method));
+ CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
+   CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common<LoopPredicate>(method, cur_level, THREAD, true));
+   CompLevel next_level = common<CallPredicate>(method, cur_level, THREAD, !TrainingData::have_data() && is_old(method));
  
    // If OSR method level is greater than the regular method level, the levels should be
    // equalized by raising the regular method level in order to avoid OSRs during each
    // invocation of the method.
    if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {

@@ -1116,16 +1602,22 @@
        next_level = CompLevel_full_optimization;
      }
    } else {
      next_level = MAX2(osr_level, next_level);
    }
+ #if INCLUDE_JVMCI
+   if (EnableJVMCI && UseJVMCICompiler &&
+       next_level == CompLevel_full_optimization && !ClassPreloader::class_preloading_finished()) {
+     next_level = cur_level;
+   }
+ #endif
    return next_level;
  }
  
  // Determine if we should do an OSR compilation of a given method.
- CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cur_level, Thread* thread) {
-   CompLevel next_level = common<LoopPredicate>(method, cur_level, true);
+ CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
+   CompLevel next_level = common<LoopPredicate>(method, cur_level, THREAD, true);
    if (cur_level == CompLevel_none) {
      // If there is a live OSR method that means that we deopted to the interpreter
      // for the transition.
      CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
      if (osr_level > CompLevel_none) {
< prev index next >