1 /*
   2  * Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/aotLinkedClassBulkLoader.hpp"
  27 #include "code/scopeDesc.hpp"
  28 #include "code/SCCache.hpp"
  29 #include "compiler/compilationPolicy.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compilerDefinitions.inline.hpp"
  32 #include "compiler/compilerOracle.hpp"
  33 #include "compiler/recompilationPolicy.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/methodData.hpp"
  36 #include "oops/method.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "oops/trainingData.hpp"
  39 #include "prims/jvmtiExport.hpp"
  40 #include "runtime/arguments.hpp"
  41 #include "runtime/deoptimization.hpp"
  42 #include "runtime/frame.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/globals_extension.hpp"
  45 #include "runtime/handles.inline.hpp"
  46 #include "runtime/safepoint.hpp"
  47 #include "runtime/safepointVerifiers.hpp"
  48 #ifdef COMPILER1
  49 #include "c1/c1_Compiler.hpp"
  50 #endif
  51 #ifdef COMPILER2
  52 #include "opto/c2compiler.hpp"
  53 #endif
  54 #if INCLUDE_JVMCI
  55 #include "jvmci/jvmci.hpp"
  56 #endif
  57 
  58 int64_t CompilationPolicy::_start_time = 0;
  59 int CompilationPolicy::_c1_count = 0;
  60 int CompilationPolicy::_c2_count = 0;
  61 int CompilationPolicy::_c3_count = 0;
  62 int CompilationPolicy::_sc_count = 0;
  63 double CompilationPolicy::_increase_threshold_at_ratio = 0;
  64 
  65 CompilationPolicy::TrainingReplayQueue CompilationPolicy::_training_replay_queue;
  66 
  67 void compilationPolicy_init() {
  68   CompilationPolicy::initialize();
  69 }
  70 
  71 int CompilationPolicy::compiler_count(CompLevel comp_level) {
  72   if (is_c1_compile(comp_level)) {
  73     return c1_count();
  74   } else if (is_c2_compile(comp_level)) {
  75     return c2_count();
  76   }
  77   return 0;
  78 }
  79 
  80 // Returns true if m must be compiled before executing it
  81 // This is intended to force compiles for methods (usually for
  82 // debugging) that would otherwise be interpreted for some reason.
  83 bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
  84   // Don't allow Xcomp to cause compiles in replay mode
  85   if (ReplayCompiles) return false;
  86 
  87   if (m->has_compiled_code()) return false;       // already compiled
  88   if (!can_be_compiled(m, comp_level)) return false;
  89 
  90   return !UseInterpreter ||                                                                        // must compile all methods
  91          (AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
  92 }
  93 
  94 void CompilationPolicy::maybe_compile_early(const methodHandle& m, TRAPS) {
  95   if (m->method_holder()->is_not_initialized()) {
  96     // 'is_not_initialized' means not only '!is_initialized', but also that
  97     // initialization has not been started yet ('!being_initialized')
  98     // Do not force compilation of methods in uninitialized classes.
  99     return;
 100   }
 101   if (!m->is_native() && MethodTrainingData::have_data()) {
 102     MethodTrainingData* mtd = MethodTrainingData::find(m);
 103     if (mtd == nullptr) {
 104       return;              // there is no training data recorded for m
 105     }
 106     bool recompile = m->code_has_clinit_barriers();
 107     CompLevel cur_level = static_cast<CompLevel>(m->highest_comp_level());
 108     CompLevel next_level = trained_transition(m, cur_level, mtd, THREAD);
 109     if ((next_level != cur_level || recompile) && can_be_compiled(m, next_level) && !CompileBroker::compilation_is_in_queue(m)) {
 110       bool requires_online_compilation = false;
 111       CompileTrainingData* ctd = mtd->last_toplevel_compile(next_level);
 112       if (ctd != nullptr) {
 113         requires_online_compilation = (ctd->init_deps_left() > 0);
 114       }
 115       if (requires_online_compilation && recompile) {
 116         return;
 117       }
 118       if (PrintTieredEvents) {
 119         print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, next_level);
 120       }
 121       CompileBroker::compile_method(m, InvocationEntryBci, next_level, methodHandle(), 0, requires_online_compilation, CompileTask::Reason_MustBeCompiled, THREAD);
 122       if (HAS_PENDING_EXCEPTION) {
 123         CLEAR_PENDING_EXCEPTION;
 124       }
 125     }
 126   }
 127 }
 128 
 129 void CompilationPolicy::maybe_compile_early_after_init(const methodHandle& m, TRAPS) {
 130   assert(m->method_holder()->is_initialized(), "Should be called after class initialization");
 131   maybe_compile_early(m, THREAD);
 132 }
 133 
 134 void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
 135   if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
 136     // don't force compilation, resolve was on behalf of compiler
 137     return;
 138   }
 139   if (m->method_holder()->is_not_initialized()) {
 140     // 'is_not_initialized' means not only '!is_initialized', but also that
 141     // initialization has not been started yet ('!being_initialized')
 142     // Do not force compilation of methods in uninitialized classes.
 143     // Note that doing this would throw an assert later,
 144     // in CompileBroker::compile_method.
 145     // We sometimes use the link resolver to do reflective lookups
 146     // even before classes are initialized.
 147     return;
 148   }
 149 
 150   if (must_be_compiled(m)) {
 151     // This path is unusual, mostly used by the '-Xcomp' stress test mode.
 152     CompLevel level = initial_compile_level(m);
 153     if (PrintTieredEvents) {
 154       print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, level);
 155     }
 156     CompileBroker::compile_method(m, InvocationEntryBci, level, methodHandle(), 0, false, CompileTask::Reason_MustBeCompiled, THREAD);
 157   }
 158 }
 159 
 160 void CompilationPolicy::replay_training_at_init_impl(InstanceKlass* klass, TRAPS) {
 161   if (!klass->has_init_deps_processed()) {
 162     ResourceMark rm;
 163     log_debug(training)("Replay training: %s", klass->external_name());
 164 
 165     KlassTrainingData* ktd = KlassTrainingData::find(klass);
 166     if (ktd != nullptr) {
 167       guarantee(ktd->has_holder(), "");
 168       ktd->notice_fully_initialized(); // sets klass->has_init_deps_processed bit
 169       assert(klass->has_init_deps_processed(), "");
 170 
 171       ktd->iterate_all_comp_deps([&](CompileTrainingData* ctd) {
 172         if (ctd->init_deps_left() == 0) {
 173           MethodTrainingData* mtd = ctd->method();
 174           if (mtd->has_holder()) {
 175             const methodHandle mh(THREAD, const_cast<Method*>(mtd->holder()));
 176             CompilationPolicy::maybe_compile_early(mh, THREAD);
 177           }
 178         }
 179       });
 180     }
 181     Array<Method*>* methods = klass->methods();
 182     for (int i = 0; i < methods->length(); i++) {
 183       const methodHandle mh(THREAD, methods->at(i));
 184       CompilationPolicy::maybe_compile_early_after_init(mh, THREAD);
 185     }
 186   }
 187 }
 188 
 189 void CompilationPolicy::replay_training_at_init(bool is_on_shutdown, TRAPS) {
 190   // Drain pending queue when no concurrent processing thread is present.
 191   if (UseConcurrentTrainingReplay) {
 192     if (VerifyTrainingData) {
 193       MonitorLocker locker(THREAD, TrainingReplayQueue_lock);
 194       while (!_training_replay_queue.is_empty_unlocked()) {
 195         locker.wait(); // let the replay training thread drain the queue
 196       }
 197     }
 198   } else {
 199     do {
 200       InstanceKlass* pending = _training_replay_queue.try_pop(TrainingReplayQueue_lock, THREAD);
 201       if (pending == nullptr) {
 202         break; // drained the queue
 203       }
 204       if (is_on_shutdown) {
 205         LogStreamHandle(Warning, training) log;
 206         if (log.is_enabled()) {
 207           ResourceMark rm;
 208           log.print("pending training replay request: %s%s",
 209                     pending->external_name(), (pending->has_aot_initialized_mirror() ? " (preinitialized)" : ""));
 210         }
 211       }
 212       replay_training_at_init_impl(pending, THREAD);
 213     } while (true);
 214   }
 215 
 216   if (VerifyTrainingData) {
 217     TrainingData::verify();
 218   }
 219 }
 220 
 221 void CompilationPolicy::replay_training_at_init(InstanceKlass* klass, TRAPS) {
 222   assert(klass->is_initialized(), "");
 223   if (TrainingData::have_data() && klass->is_shared() &&
 224       (CompileBroker::replay_initialized() || !klass->has_aot_initialized_mirror())) { // ignore preloaded classes during early startup
 225     if (UseConcurrentTrainingReplay || !CompileBroker::replay_initialized()) {
 226       _training_replay_queue.push(klass, TrainingReplayQueue_lock, THREAD);
 227     } else {
 228       replay_training_at_init_impl(klass, THREAD);
 229     }
 230     assert(!HAS_PENDING_EXCEPTION, "");
 231   }
 232 }
 233 
 234 // For TrainingReplayQueue
 235 template<>
 236 void CompilationPolicyUtils::Queue<InstanceKlass>::print_on(outputStream* st) {
 237   int pos = 0;
 238   for (QueueNode* cur = _head; cur != nullptr; cur = cur->next()) {
 239     ResourceMark rm;
 240     InstanceKlass* ik = cur->value();
 241     st->print_cr("%3d: " INTPTR_FORMAT " %s", ++pos, p2i(ik), ik->external_name());
 242   }
 243 }
 244 
 245 void CompilationPolicy::replay_training_at_init_loop(TRAPS) {
 246   precond(UseConcurrentTrainingReplay);
 247 
 248   while (!CompileBroker::is_compilation_disabled_forever() || VerifyTrainingData) {
 249     InstanceKlass* ik = _training_replay_queue.pop(TrainingReplayQueue_lock, THREAD);
 250     replay_training_at_init_impl(ik, THREAD);
 251   }
 252 }
 253 
 254 static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_level) {
 255   if (comp_level == CompLevel_any) {
 256      if (CompilerConfig::is_c1_only()) {
 257        comp_level = CompLevel_simple;
 258      } else if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
 259        comp_level = CompLevel_full_optimization;
 260      }
 261   }
 262   return comp_level;
 263 }
 264 
 265 // Returns true if m is allowed to be compiled
 266 bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
 267   // allow any levels for WhiteBox
 268   assert(WhiteBoxAPI || comp_level == CompLevel_any || is_compile(comp_level), "illegal compilation level %d", comp_level);
 269 
 270   if (m->is_abstract()) return false;
 271   if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
 272 
 273   // Math intrinsics should never be compiled as this can lead to
 274   // monotonicity problems because the interpreter will prefer the
 275   // compiled code to the intrinsic version.  This can't happen in
 276   // production because the invocation counter can't be incremented
 277   // but we shouldn't expose the system to this problem in testing
 278   // modes.
 279   if (!AbstractInterpreter::can_be_compiled(m)) {
 280     return false;
 281   }
 282   comp_level = adjust_level_for_compilability_query((CompLevel) comp_level);
 283   if (comp_level == CompLevel_any || is_compile(comp_level)) {
 284     return !m->is_not_compilable(comp_level);
 285   }
 286   return false;
 287 }
 288 
 289 // Returns true if m is allowed to be osr compiled
 290 bool CompilationPolicy::can_be_osr_compiled(const methodHandle& m, int comp_level) {
 291   bool result = false;
 292   comp_level = adjust_level_for_compilability_query((CompLevel) comp_level);
 293   if (comp_level == CompLevel_any || is_compile(comp_level)) {
 294     result = !m->is_not_osr_compilable(comp_level);
 295   }
 296   return (result && can_be_compiled(m, comp_level));
 297 }
 298 
 299 bool CompilationPolicy::is_compilation_enabled() {
 300   // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
 301   return CompileBroker::should_compile_new_jobs();
 302 }
 303 
 304 CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue) {
 305   // Remove unloaded methods from the queue
 306   for (CompileTask* task = compile_queue->first(); task != nullptr; ) {
 307     CompileTask* next = task->next();
 308     if (task->is_unloaded()) {
 309       compile_queue->remove_and_mark_stale(task);
 310     }
 311     task = next;
 312   }
 313 #if INCLUDE_JVMCI
 314   if (UseJVMCICompiler && !BackgroundCompilation) {
 315     /*
 316      * In blocking compilation mode, the CompileBroker will make
 317      * compilations submitted by a JVMCI compiler thread non-blocking. These
 318      * compilations should be scheduled after all blocking compilations
 319      * to service non-compiler related compilations sooner and reduce the
 320      * chance of such compilations timing out.
 321      */
 322     for (CompileTask* task = compile_queue->first(); task != nullptr; task = task->next()) {
 323       if (task->is_blocking()) {
 324         return task;
 325       }
 326     }
 327   }
 328 #endif
 329   return compile_queue->first();
 330 }
 331 
 332 // Simple methods are as good being compiled with C1 as C2.
 333 // Determine if a given method is such a case.
 334 bool CompilationPolicy::is_trivial(const methodHandle& method) {
 335   if (method->is_accessor() ||
 336       method->is_constant_getter()) {
 337     return true;
 338   }
 339   return false;
 340 }
 341 
 342 bool CompilationPolicy::force_comp_at_level_simple(const methodHandle& method) {
 343   if (CompilationModeFlag::quick_internal()) {
 344 #if INCLUDE_JVMCI
 345     if (UseJVMCICompiler) {
 346       AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
 347       if (comp != nullptr && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
 348         return !SCCache::is_C3_on();
 349       }
 350     }
 351 #endif
 352   }
 353   return false;
 354 }
 355 
 356 CompLevel CompilationPolicy::comp_level(Method* method) {
 357   nmethod *nm = method->code();
 358   if (nm != nullptr && nm->is_in_use()) {
 359     return (CompLevel)nm->comp_level();
 360   }
 361   return CompLevel_none;
 362 }
 363 
 364 // Call and loop predicates determine whether a transition to a higher
 365 // compilation level should be performed (pointers to predicate functions
 366 // are passed to common()).
 367 // Tier?LoadFeedback is basically a coefficient that determines of
 368 // how many methods per compiler thread can be in the queue before
 369 // the threshold values double.
 370 class LoopPredicate : AllStatic {
 371 public:
 372   static bool apply_scaled(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
 373     double threshold_scaling;
 374     if (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, threshold_scaling)) {
 375       scale *= threshold_scaling;
 376     }
 377     switch(cur_level) {
 378     case CompLevel_none:
 379     case CompLevel_limited_profile:
 380       return b >= Tier3BackEdgeThreshold * scale;
 381     case CompLevel_full_profile:
 382       return b >= Tier4BackEdgeThreshold * scale;
 383     default:
 384       return true;
 385     }
 386   }
 387 
 388   static bool apply(const methodHandle& method, CompLevel cur_level, int i, int b) {
 389     double k = 1;
 390     switch(cur_level) {
 391     case CompLevel_none:
 392     // Fall through
 393     case CompLevel_limited_profile: {
 394       k = CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 395       break;
 396     }
 397     case CompLevel_full_profile: {
 398       k = CompilationPolicy::threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 399       break;
 400     }
 401     default:
 402       return true;
 403     }
 404     return apply_scaled(method, cur_level, i, b, k);
 405   }
 406 };
 407 
 408 class CallPredicate : AllStatic {
 409 public:
 410   static bool apply_scaled(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
 411     double threshold_scaling;
 412     if (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, threshold_scaling)) {
 413       scale *= threshold_scaling;
 414     }
 415     switch(cur_level) {
 416     case CompLevel_none:
 417     case CompLevel_limited_profile:
 418       return (i >= Tier3InvocationThreshold * scale) ||
 419              (i >= Tier3MinInvocationThreshold * scale && i + b >= Tier3CompileThreshold * scale);
 420     case CompLevel_full_profile:
 421       return (i >= Tier4InvocationThreshold * scale) ||
 422              (i >= Tier4MinInvocationThreshold * scale && i + b >= Tier4CompileThreshold * scale);
 423     default:
 424      return true;
 425     }
 426   }
 427 
 428   static bool apply(const methodHandle& method, CompLevel cur_level, int i, int b) {
 429     double k = 1;
 430     switch(cur_level) {
 431     case CompLevel_none:
 432     case CompLevel_limited_profile: {
 433       k = CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 434       break;
 435     }
 436     case CompLevel_full_profile: {
 437       k = CompilationPolicy::threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 438       break;
 439     }
 440     default:
 441       return true;
 442     }
 443     return apply_scaled(method, cur_level, i, b, k);
 444   }
 445 };
 446 
 447 double CompilationPolicy::threshold_scale(CompLevel level, int feedback_k) {
 448   int comp_count = compiler_count(level);
 449   if (comp_count > 0) {
 450     double queue_size = CompileBroker::queue_size(level);
 451     double k = (double)queue_size / ((double)feedback_k * (double)comp_count) + 1;
 452 
 453     // Increase C1 compile threshold when the code cache is filled more
 454     // than specified by IncreaseFirstTierCompileThresholdAt percentage.
 455     // The main intention is to keep enough free space for C2 compiled code
 456     // to achieve peak performance if the code cache is under stress.
 457     if (CompilerConfig::is_tiered() && !CompilationModeFlag::disable_intermediate() && is_c1_compile(level))  {
 458       double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
 459       if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
 460         k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
 461       }
 462     }
 463     return k;
 464   }
 465   return 1;
 466 }
 467 
 468 void CompilationPolicy::print_counters(const char* prefix, Method* m) {
 469   int invocation_count = m->invocation_count();
 470   int backedge_count = m->backedge_count();
 471   MethodData* mdh = m->method_data();
 472   int mdo_invocations = 0, mdo_backedges = 0;
 473   int mdo_invocations_start = 0, mdo_backedges_start = 0;
 474   if (mdh != nullptr) {
 475     mdo_invocations = mdh->invocation_count();
 476     mdo_backedges = mdh->backedge_count();
 477     mdo_invocations_start = mdh->invocation_count_start();
 478     mdo_backedges_start = mdh->backedge_count_start();
 479   }
 480   tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix,
 481       invocation_count, backedge_count, prefix,
 482       mdo_invocations, mdo_invocations_start,
 483       mdo_backedges, mdo_backedges_start);
 484   tty->print(" %smax levels=%d,%d", prefix,
 485       m->highest_comp_level(), m->highest_osr_comp_level());
 486 }
 487 
 488 void CompilationPolicy::print_training_data(const char* prefix, Method* method) {
 489   methodHandle m(Thread::current(), method);
 490   tty->print(" %smtd: ", prefix);
 491   MethodTrainingData* mtd = MethodTrainingData::find(m);
 492   if (mtd == nullptr) {
 493     tty->print("null");
 494   } else {
 495     MethodData* md = mtd->final_profile();
 496     tty->print("mdo=");
 497     if (md == nullptr) {
 498       tty->print("null");
 499     } else {
 500       int mdo_invocations = md->invocation_count();
 501       int mdo_backedges = md->backedge_count();
 502       int mdo_invocations_start = md->invocation_count_start();
 503       int mdo_backedges_start = md->backedge_count_start();
 504       tty->print("%d(%d), %d(%d)", mdo_invocations, mdo_invocations_start, mdo_backedges, mdo_backedges_start);
 505     }
 506     CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
 507     tty->print(", deps=");
 508     if (ctd == nullptr) {
 509       tty->print("null");
 510     } else {
 511       tty->print("%d", ctd->init_deps_left());
 512     }
 513   }
 514 }
 515 
 516 // Print an event.
 517 void CompilationPolicy::print_event(EventType type, Method* m, Method* im, int bci, CompLevel level) {
 518   bool inlinee_event = m != im;
 519 
 520   ttyLocker tty_lock;
 521   tty->print("%lf: [", os::elapsedTime());
 522 
 523   switch(type) {
 524   case CALL:
 525     tty->print("call");
 526     break;
 527   case LOOP:
 528     tty->print("loop");
 529     break;
 530   case COMPILE:
 531     tty->print("compile");
 532     break;
 533   case FORCE_COMPILE:
 534     tty->print("force-compile");
 535     break;
 536   case FORCE_RECOMPILE:
 537     tty->print("force-recompile");
 538     break;
 539   case REMOVE_FROM_QUEUE:
 540     tty->print("remove-from-queue");
 541     break;
 542   case UPDATE_IN_QUEUE:
 543     tty->print("update-in-queue");
 544     break;
 545   case REPROFILE:
 546     tty->print("reprofile");
 547     break;
 548   case MAKE_NOT_ENTRANT:
 549     tty->print("make-not-entrant");
 550     break;
 551   default:
 552     tty->print("unknown");
 553   }
 554 
 555   tty->print(" level=%d ", level);
 556 
 557   ResourceMark rm;
 558   char *method_name = m->name_and_sig_as_C_string();
 559   tty->print("[%s", method_name);
 560   if (inlinee_event) {
 561     char *inlinee_name = im->name_and_sig_as_C_string();
 562     tty->print(" [%s]] ", inlinee_name);
 563   }
 564   else tty->print("] ");
 565   tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
 566                                       CompileBroker::queue_size(CompLevel_full_optimization));
 567 
 568   tty->print(" rate=");
 569   if (m->prev_time() == 0) tty->print("n/a");
 570   else tty->print("%f", m->rate());
 571 
 572   RecompilationPolicy::print_load_average();
 573 
 574   tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
 575                                threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
 576 
 577   if (type != COMPILE) {
 578     print_counters("", m);
 579     if (inlinee_event) {
 580       print_counters("inlinee ", im);
 581     }
 582     tty->print(" compilable=");
 583     bool need_comma = false;
 584     if (!m->is_not_compilable(CompLevel_full_profile)) {
 585       tty->print("c1");
 586       need_comma = true;
 587     }
 588     if (!m->is_not_osr_compilable(CompLevel_full_profile)) {
 589       if (need_comma) tty->print(",");
 590       tty->print("c1-osr");
 591       need_comma = true;
 592     }
 593     if (!m->is_not_compilable(CompLevel_full_optimization)) {
 594       if (need_comma) tty->print(",");
 595       tty->print("c2");
 596       need_comma = true;
 597     }
 598     if (!m->is_not_osr_compilable(CompLevel_full_optimization)) {
 599       if (need_comma) tty->print(",");
 600       tty->print("c2-osr");
 601     }
 602     tty->print(" status=");
 603     if (m->queued_for_compilation()) {
 604       tty->print("in-queue");
 605     } else tty->print("idle");
 606     print_training_data("", m);
 607     if (inlinee_event) {
 608       print_training_data("inlinee ", im);
 609     }
 610   }
 611   tty->print_cr("]");
 612 }
 613 
 614 void CompilationPolicy::initialize() {
 615   if (!CompilerConfig::is_interpreter_only()) {
 616     int count = CICompilerCount;
 617     bool c1_only = CompilerConfig::is_c1_only();
 618     bool c2_only = CompilerConfig::is_c2_or_jvmci_compiler_only();
 619 
 620 #ifdef _LP64
 621     // Turn on ergonomic compiler count selection
 622     if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
 623       FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
 624     }
 625     if (CICompilerCountPerCPU) {
 626       // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
 627       int log_cpu = log2i(os::active_processor_count());
 628       int loglog_cpu = log2i(MAX2(log_cpu, 1));
 629       count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2);
 630       // Make sure there is enough space in the code cache to hold all the compiler buffers
 631       size_t c1_size = 0;
 632 #ifdef COMPILER1
 633       c1_size = Compiler::code_buffer_size();
 634 #endif
 635       size_t c2_size = 0;
 636 #ifdef COMPILER2
 637       c2_size = C2Compiler::initial_code_buffer_size();
 638 #endif
 639       size_t buffer_size = c1_only ? c1_size : (c1_size/3 + 2*c2_size/3);
 640       int max_count = (ReservedCodeCacheSize - (int)CompilerConfig::min_code_cache_size()) / (int)buffer_size;
 641       if (count > max_count) {
 642         // Lower the compiler count such that all buffers fit into the code cache
 643         count = MAX2(max_count, c1_only ? 1 : 2);
 644       }
 645       FLAG_SET_ERGO(CICompilerCount, count);
 646     }
 647 #else
 648     // On 32-bit systems, the number of compiler threads is limited to 3.
 649     // On these systems, the virtual address space available to the JVM
 650     // is usually limited to 2-4 GB (the exact value depends on the platform).
 651     // As the compilers (especially C2) can consume a large amount of
 652     // memory, scaling the number of compiler threads with the number of
 653     // available cores can result in the exhaustion of the address space
 654     /// available to the VM and thus cause the VM to crash.
 655     if (FLAG_IS_DEFAULT(CICompilerCount)) {
 656       count = 3;
 657       FLAG_SET_ERGO(CICompilerCount, count);
 658     }
 659 #endif
 660 
 661     if (c1_only) {
 662       // No C2 compiler thread required
 663       set_c1_count(count);
 664     } else if (c2_only) {
 665       set_c2_count(count);
 666     } else {
 667 #if INCLUDE_JVMCI
 668       if (UseJVMCICompiler && UseJVMCINativeLibrary) {
 669         int libjvmci_count = MAX2((int) (count * JVMCINativeLibraryThreadFraction), 1);
 670         int c1_count = MAX2(count - libjvmci_count, 1);
 671         set_c2_count(libjvmci_count);
 672         set_c1_count(c1_count);
 673       } else if (SCCache::is_C3_on()) {
 674         set_c1_count(MAX2(count / 3, 1));
 675         set_c2_count(MAX2(count - c1_count(), 1));
 676         set_c3_count(1);
 677       } else
 678 #endif
 679       {
 680         set_c1_count(MAX2(count / 3, 1));
 681         set_c2_count(MAX2(count - c1_count(), 1));
 682       }
 683     }
 684     if (SCCache::is_code_load_thread_on()) {
 685       set_sc_count((c1_only || c2_only) ? 1 : 2); // At minimum we need 2 threads to load C1 and C2 cached code in parallel
 686     }
 687     assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
 688     set_increase_threshold_at_ratio();
 689   }
 690 
 691   set_start_time(nanos_to_millis(os::javaTimeNanos()));
 692 }
 693 
 694 
 695 
 696 
 697 #ifdef ASSERT
 698 bool CompilationPolicy::verify_level(CompLevel level) {
 699   if (TieredCompilation && level > TieredStopAtLevel) {
 700     return false;
 701   }
 702   // Check if there is a compiler to process the requested level
 703   if (!CompilerConfig::is_c1_enabled() && is_c1_compile(level)) {
 704     return false;
 705   }
 706   if (!CompilerConfig::is_c2_or_jvmci_compiler_enabled() && is_c2_compile(level)) {
 707     return false;
 708   }
 709 
 710   // Interpreter level is always valid.
 711   if (level == CompLevel_none) {
 712     return true;
 713   }
 714   if (CompilationModeFlag::normal()) {
 715     return true;
 716   } else if (CompilationModeFlag::quick_only()) {
 717     return level == CompLevel_simple;
 718   } else if (CompilationModeFlag::high_only()) {
 719     return level == CompLevel_full_optimization;
 720   } else if (CompilationModeFlag::high_only_quick_internal()) {
 721     return level == CompLevel_full_optimization || level == CompLevel_simple;
 722   }
 723   return false;
 724 }
 725 #endif
 726 
 727 
 728 CompLevel CompilationPolicy::highest_compile_level() {
 729   CompLevel level = CompLevel_none;
 730   // Setup the maximum level available for the current compiler configuration.
 731   if (!CompilerConfig::is_interpreter_only()) {
 732     if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
 733       level = CompLevel_full_optimization;
 734     } else if (CompilerConfig::is_c1_enabled()) {
 735       if (CompilerConfig::is_c1_simple_only()) {
 736         level = CompLevel_simple;
 737       } else {
 738         level = CompLevel_full_profile;
 739       }
 740     }
 741   }
 742   // Clamp the maximum level with TieredStopAtLevel.
 743   if (TieredCompilation) {
 744     level = MIN2(level, (CompLevel) TieredStopAtLevel);
 745   }
 746 
 747   // Fix it up if after the clamping it has become invalid.
 748   // Bring it monotonically down depending on the next available level for
 749   // the compilation mode.
 750   if (!CompilationModeFlag::normal()) {
 751     // a) quick_only - levels 2,3,4 are invalid; levels -1,0,1 are valid;
 752     // b) high_only - levels 1,2,3 are invalid; levels -1,0,4 are valid;
 753     // c) high_only_quick_internal - levels 2,3 are invalid; levels -1,0,1,4 are valid.
 754     if (CompilationModeFlag::quick_only()) {
 755       if (level == CompLevel_limited_profile || level == CompLevel_full_profile || level == CompLevel_full_optimization) {
 756         level = CompLevel_simple;
 757       }
 758     } else if (CompilationModeFlag::high_only()) {
 759       if (level == CompLevel_simple || level == CompLevel_limited_profile || level == CompLevel_full_profile) {
 760         level = CompLevel_none;
 761       }
 762     } else if (CompilationModeFlag::high_only_quick_internal()) {
 763       if (level == CompLevel_limited_profile || level == CompLevel_full_profile) {
 764         level = CompLevel_simple;
 765       }
 766     }
 767   }
 768 
 769   assert(verify_level(level), "Invalid highest compilation level: %d", level);
 770   return level;
 771 }
 772 
 773 CompLevel CompilationPolicy::limit_level(CompLevel level) {
 774   level = MIN2(level, highest_compile_level());
 775   assert(verify_level(level), "Invalid compilation level: %d", level);
 776   return level;
 777 }
 778 
 779 CompLevel CompilationPolicy::initial_compile_level(const methodHandle& method) {
 780   CompLevel level = CompLevel_any;
 781   if (CompilationModeFlag::normal()) {
 782     level = CompLevel_full_profile;
 783   } else if (CompilationModeFlag::quick_only()) {
 784     level = CompLevel_simple;
 785   } else if (CompilationModeFlag::high_only()) {
 786     level = CompLevel_full_optimization;
 787   } else if (CompilationModeFlag::high_only_quick_internal()) {
 788     if (force_comp_at_level_simple(method)) {
 789       level = CompLevel_simple;
 790     } else {
 791       level = CompLevel_full_optimization;
 792     }
 793   }
 794   assert(level != CompLevel_any, "Unhandled compilation mode");
 795   return limit_level(level);
 796 }
 797 
 798 // Set carry flags on the counters if necessary
 799 void CompilationPolicy::handle_counter_overflow(const methodHandle& method) {
 800   MethodCounters *mcs = method->method_counters();
 801   if (mcs != nullptr) {
 802     mcs->invocation_counter()->set_carry_on_overflow();
 803     mcs->backedge_counter()->set_carry_on_overflow();
 804   }
 805   MethodData* mdo = method->method_data();
 806   if (mdo != nullptr) {
 807     mdo->invocation_counter()->set_carry_on_overflow();
 808     mdo->backedge_counter()->set_carry_on_overflow();
 809   }
 810 }
 811 
 812 // Called with the queue locked and with at least one element
 813 CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue, JavaThread* THREAD) {
 814   CompileTask *max_blocking_task = nullptr;
 815   CompileTask *max_task = nullptr;
 816   Method* max_method = nullptr;
 817 
 818   int64_t t = nanos_to_millis(os::javaTimeNanos());
 819   // Iterate through the queue and find a method with a maximum rate.
 820   for (CompileTask* task = compile_queue->first(); task != nullptr;) {
 821     CompileTask* next_task = task->next();
 822     // If a method was unloaded or has been stale for some time, remove it from the queue.
 823     // Blocking tasks and tasks submitted from whitebox API don't become stale
 824     if (task->is_unloaded()) {
 825       compile_queue->remove_and_mark_stale(task);
 826       task = next_task;
 827       continue;
 828     }
 829     if (task->is_scc()) {
 830       // SCC tasks are on separate queue, and they should load fast. There is no need to walk
 831       // the rest of the queue, just take the task and go.
 832       return task;
 833     }
 834     Method* method = task->method();
 835     methodHandle mh(THREAD, method);
 836     if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, mh) && !is_old(mh)) {
 837       if (PrintTieredEvents) {
 838         print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
 839       }
 840       method->clear_queued_for_compilation();
 841       method->set_pending_queue_processed(false);
 842       compile_queue->remove_and_mark_stale(task);
 843       task = next_task;
 844       continue;
 845     }
 846     update_rate(t, mh);
 847     if (max_task == nullptr || compare_methods(method, max_method) || compare_tasks(task, max_task)) {
 848       // Select a method with the highest rate
 849       max_task = task;
 850       max_method = method;
 851     }
 852 
 853     if (task->is_blocking()) {
 854       if (max_blocking_task == nullptr || compare_methods(method, max_blocking_task->method())) {
 855         max_blocking_task = task;
 856       }
 857     }
 858 
 859     task = next_task;
 860   }
 861 
 862   if (max_blocking_task != nullptr) {
 863     // In blocking compilation mode, the CompileBroker will make
 864     // compilations submitted by a JVMCI compiler thread non-blocking. These
 865     // compilations should be scheduled after all blocking compilations
 866     // to service non-compiler related compilations sooner and reduce the
 867     // chance of such compilations timing out.
 868     max_task = max_blocking_task;
 869     max_method = max_task->method();
 870   }
 871 
 872   methodHandle max_method_h(THREAD, max_method);
 873 
 874   if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
 875       max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
 876     max_task->set_comp_level(CompLevel_limited_profile);
 877 
 878     if (CompileBroker::compilation_is_complete(max_method_h(), max_task->osr_bci(), CompLevel_limited_profile,
 879                                                false /* requires_online_compilation */,
 880                                                CompileTask::Reason_None)) {
 881       if (PrintTieredEvents) {
 882         print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 883       }
 884       compile_queue->remove_and_mark_stale(max_task);
 885       max_method->clear_queued_for_compilation();
 886       return nullptr;
 887     }
 888 
 889     if (PrintTieredEvents) {
 890       print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 891     }
 892   }
 893   return max_task;
 894 }
 895 
 896 void CompilationPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
 897   for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
 898     if (PrintTieredEvents) {
 899       print_event(REPROFILE, sd->method(), sd->method(), InvocationEntryBci, CompLevel_none);
 900     }
 901     MethodData* mdo = sd->method()->method_data();
 902     if (mdo != nullptr) {
 903       mdo->reset_start_counters();
 904     }
 905     if (sd->is_top()) break;
 906   }
 907 }
 908 
 909 nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle& inlinee,
 910                                       int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
 911   if (PrintTieredEvents) {
 912     print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
 913   }
 914 
 915 #if INCLUDE_JVMCI
 916   if (EnableJVMCI && UseJVMCICompiler &&
 917       comp_level == CompLevel_full_optimization && !AOTLinkedClassBulkLoader::class_preloading_finished()) {
 918     return nullptr;
 919   }
 920 #endif
 921 
 922   if (comp_level == CompLevel_none &&
 923       JvmtiExport::can_post_interpreter_events() &&
 924       THREAD->is_interp_only_mode()) {
 925     return nullptr;
 926   }
 927   if (ReplayCompiles) {
 928     // Don't trigger other compiles in testing mode
 929     return nullptr;
 930   }
 931 
 932   handle_counter_overflow(method);
 933   if (method() != inlinee()) {
 934     handle_counter_overflow(inlinee);
 935   }
 936 
 937   if (bci == InvocationEntryBci) {
 938     method_invocation_event(method, inlinee, comp_level, nm, THREAD);
 939   } else {
 940     // method == inlinee if the event originated in the main method
 941     method_back_branch_event(method, inlinee, bci, comp_level, nm, THREAD);
 942     // Check if event led to a higher level OSR compilation
 943     CompLevel expected_comp_level = MIN2(CompLevel_full_optimization, static_cast<CompLevel>(comp_level + 1));
 944     if (!CompilationModeFlag::disable_intermediate() && inlinee->is_not_osr_compilable(expected_comp_level)) {
 945       // It's not possible to reach the expected level so fall back to simple.
 946       expected_comp_level = CompLevel_simple;
 947     }
 948     CompLevel max_osr_level = static_cast<CompLevel>(inlinee->highest_osr_comp_level());
 949     if (max_osr_level >= expected_comp_level) { // fast check to avoid locking in a typical scenario
 950       nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, expected_comp_level, false);
 951       assert(osr_nm == nullptr || osr_nm->comp_level() >= expected_comp_level, "lookup_osr_nmethod_for is broken");
 952       if (osr_nm != nullptr && osr_nm->comp_level() != comp_level) {
 953         // Perform OSR with new nmethod
 954         return osr_nm;
 955       }
 956     }
 957   }
 958   return nullptr;
 959 }
 960 
 961 // Check if the method can be compiled, change level if necessary
 962 void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level, TRAPS) {
 963   assert(verify_level(level), "Invalid compilation level requested: %d", level);
 964 
 965   if (level == CompLevel_none) {
 966     if (mh->has_compiled_code()) {
 967       // Happens when we switch to interpreter to profile.
 968       MutexLocker ml(Compile_lock);
 969       NoSafepointVerifier nsv;
 970       if (mh->has_compiled_code()) {
 971         mh->code()->make_not_used();
 972       }
 973       // Deoptimize immediately (we don't have to wait for a compile).
 974       JavaThread* jt = THREAD;
 975       RegisterMap map(jt,
 976                       RegisterMap::UpdateMap::skip,
 977                       RegisterMap::ProcessFrames::include,
 978                       RegisterMap::WalkContinuation::skip);
 979       frame fr = jt->last_frame().sender(&map);
 980       Deoptimization::deoptimize_frame(jt, fr.id());
 981     }
 982     return;
 983   }
 984 
 985   if (!CompilationModeFlag::disable_intermediate()) {
 986     // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling
 987     // in the interpreter and then compile with C2 (the transition function will request that,
 988     // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with
 989     // pure C1.
 990     if ((bci == InvocationEntryBci && !can_be_compiled(mh, level))) {
 991       if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
 992         compile(mh, bci, CompLevel_simple, THREAD);
 993       }
 994       return;
 995     }
 996     if ((bci != InvocationEntryBci && !can_be_osr_compiled(mh, level))) {
 997       if (level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) {
 998         nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
 999         if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) {
1000           // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
1001           osr_nm->make_not_entrant();
1002         }
1003         compile(mh, bci, CompLevel_simple, THREAD);
1004       }
1005       return;
1006     }
1007   }
1008   if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
1009     return;
1010   }
1011   if (!CompileBroker::compilation_is_in_queue(mh)) {
1012     if (PrintTieredEvents) {
1013       print_event(COMPILE, mh(), mh(), bci, level);
1014     }
1015     int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
1016     update_rate(nanos_to_millis(os::javaTimeNanos()), mh);
1017     bool requires_online_compilation = false;
1018     if (TrainingData::have_data()) {
1019       MethodTrainingData* mtd = MethodTrainingData::find(mh);
1020       if (mtd != nullptr) {
1021         CompileTrainingData* ctd = mtd->last_toplevel_compile(level);
1022         if (ctd != nullptr) {
1023           requires_online_compilation = (ctd->init_deps_left() > 0);
1024         }
1025       }
1026     }
1027     CompileBroker::compile_method(mh, bci, level, mh, hot_count, requires_online_compilation, CompileTask::Reason_Tiered, THREAD);
1028   }
1029 }
1030 
1031 // update_rate() is called from select_task() while holding a compile queue lock.
1032 void CompilationPolicy::update_rate(int64_t t, const methodHandle& method) {
1033   // Skip update if counters are absent.
1034   // Can't allocate them since we are holding compile queue lock.
1035   if (method->method_counters() == nullptr)  return;
1036 
1037   if (is_old(method)) {
1038     // We don't remove old methods from the queue,
1039     // so we can just zero the rate.
1040     method->set_rate(0);
1041     return;
1042   }
1043 
1044   // We don't update the rate if we've just came out of a safepoint.
1045   // delta_s is the time since last safepoint in milliseconds.
1046   int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
1047   int64_t delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement
1048   // How many events were there since the last time?
1049   int event_count = method->invocation_count() + method->backedge_count();
1050   int delta_e = event_count - method->prev_event_count();
1051 
1052   // We should be running for at least 1ms.
1053   if (delta_s >= TieredRateUpdateMinTime) {
1054     // And we must've taken the previous point at least 1ms before.
1055     if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
1056       method->set_prev_time(t);
1057       method->set_prev_event_count(event_count);
1058       method->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
1059     } else {
1060       if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
1061         // If nothing happened for 25ms, zero the rate. Don't modify prev values.
1062         method->set_rate(0);
1063       }
1064     }
1065   }
1066 }
1067 
1068 // Check if this method has been stale for a given number of milliseconds.
1069 // See select_task().
1070 bool CompilationPolicy::is_stale(int64_t t, int64_t timeout, const methodHandle& method) {
1071   int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
1072   int64_t delta_t = t - method->prev_time();
1073   if (delta_t > timeout && delta_s > timeout) {
1074     int event_count = method->invocation_count() + method->backedge_count();
1075     int delta_e = event_count - method->prev_event_count();
1076     // Return true if there were no events.
1077     return delta_e == 0;
1078   }
1079   return false;
1080 }
1081 
1082 // We don't remove old methods from the compile queue even if they have
1083 // very low activity. See select_task().
1084 bool CompilationPolicy::is_old(const methodHandle& method) {
1085   int i = method->invocation_count();
1086   int b = method->backedge_count();
1087   double k = TieredOldPercentage / 100.0;
1088 
1089   return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1090 }
1091 
1092 double CompilationPolicy::weight(Method* method) {
1093   return (double)(method->rate() + 1) * (method->invocation_count() + 1) * (method->backedge_count() + 1);
1094 }
1095 
1096 // Apply heuristics and return true if x should be compiled before y
1097 bool CompilationPolicy::compare_methods(Method* x, Method* y) {
1098   if (x->highest_comp_level() > y->highest_comp_level()) {
1099     // recompilation after deopt
1100     return true;
1101   } else
1102     if (x->highest_comp_level() == y->highest_comp_level()) {
1103       if (weight(x) > weight(y)) {
1104         return true;
1105       }
1106     }
1107   return false;
1108 }
1109 
1110 bool CompilationPolicy::compare_tasks(CompileTask* x, CompileTask* y) {
1111   assert(!x->is_scc() && !y->is_scc(), "SC tasks are not expected here");
1112   if (x->compile_reason() != y->compile_reason() && y->compile_reason() == CompileTask::Reason_MustBeCompiled) {
1113     return true;
1114   }
1115   return false;
1116 }
1117 
1118 // Is method profiled enough?
1119 bool CompilationPolicy::is_method_profiled(const methodHandle& method) {
1120   MethodData* mdo = method->method_data();
1121   if (mdo != nullptr) {
1122     int i = mdo->invocation_count_delta();
1123     int b = mdo->backedge_count_delta();
1124     return CallPredicate::apply_scaled(method, CompLevel_full_profile, i, b, 1);
1125   }
1126   return false;
1127 }
1128 
1129 
1130 // Determine is a method is mature.
1131 bool CompilationPolicy::is_mature(MethodData* mdo) {
1132   if (Arguments::is_compiler_only()) {
1133     // Always report profiles as immature with -Xcomp
1134     return false;
1135   }
1136   methodHandle mh(Thread::current(), mdo->method());
1137   if (mdo != nullptr) {
1138     int i = mdo->invocation_count();
1139     int b = mdo->backedge_count();
1140     double k = ProfileMaturityPercentage / 100.0;
1141     return CallPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k) || LoopPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k);
1142   }
1143   return false;
1144 }
1145 
1146 // If a method is old enough and is still in the interpreter we would want to
1147 // start profiling without waiting for the compiled method to arrive.
1148 // We also take the load on compilers into the account.
1149 bool CompilationPolicy::should_create_mdo(const methodHandle& method, CompLevel cur_level) {
1150   if (cur_level != CompLevel_none || force_comp_at_level_simple(method) || CompilationModeFlag::quick_only() || !ProfileInterpreter) {
1151     return false;
1152   }
1153 
1154   if (TrainingData::have_data()) {
1155     MethodTrainingData* mtd = MethodTrainingData::find(method);
1156     if (mtd != nullptr && mtd->saw_level(CompLevel_full_optimization)) {
1157       return true;
1158     }
1159     return false;
1160   }
1161 
1162   if (is_old(method)) {
1163     return true;
1164   }
1165 
1166   int i = method->invocation_count();
1167   int b = method->backedge_count();
1168   double k = Tier0ProfilingStartPercentage / 100.0;
1169 
1170   // If the top level compiler is not keeping up, delay profiling.
1171   if (CompileBroker::queue_size(CompLevel_full_optimization) <= Tier0Delay * compiler_count(CompLevel_full_optimization)) {
1172     return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1173   }
1174   return false;
1175 }
1176 
1177 // Inlining control: if we're compiling a profiled method with C1 and the callee
1178 // is known to have OSRed in a C2 version, don't inline it.
1179 bool CompilationPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
1180   CompLevel comp_level = (CompLevel)env->comp_level();
1181   if (comp_level == CompLevel_full_profile ||
1182       comp_level == CompLevel_limited_profile) {
1183     return callee->highest_osr_comp_level() == CompLevel_full_optimization;
1184   }
1185   return false;
1186 }
1187 
1188 // Create MDO if necessary.
1189 void CompilationPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
1190   if (mh->is_native() ||
1191       mh->is_abstract() ||
1192       mh->is_accessor() ||
1193       mh->is_constant_getter()) {
1194     return;
1195   }
1196   if (mh->method_data() == nullptr) {
1197     Method::build_profiling_method_data(mh, CHECK_AND_CLEAR);
1198   }
1199   if (ProfileInterpreter && THREAD->has_last_Java_frame()) {
1200     MethodData* mdo = mh->method_data();
1201     if (mdo != nullptr) {
1202       frame last_frame = THREAD->last_frame();
1203       if (last_frame.is_interpreted_frame() && mh == last_frame.interpreter_frame_method()) {
1204         int bci = last_frame.interpreter_frame_bci();
1205         address dp = mdo->bci_to_dp(bci);
1206         last_frame.interpreter_frame_set_mdp(dp);
1207       }
1208     }
1209   }
1210 }
1211 
1212 CompLevel CompilationPolicy::trained_transition_from_none(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1213   precond(mtd != nullptr);
1214   precond(cur_level == CompLevel_none);
1215 
1216   if (mtd->only_inlined() && !mtd->saw_level(CompLevel_full_optimization)) {
1217     return CompLevel_none;
1218   }
1219 
1220   bool training_has_profile = (mtd->final_profile() != nullptr);
1221   if (mtd->saw_level(CompLevel_full_optimization) && !training_has_profile) {
1222     return CompLevel_full_profile;
1223   }
1224 
1225   CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
1226   switch (highest_training_level) {
1227     case CompLevel_limited_profile:
1228     case CompLevel_full_profile:
1229       return CompLevel_limited_profile;
1230     case CompLevel_simple:
1231       return CompLevel_simple;
1232     case CompLevel_none:
1233       return CompLevel_none;
1234     default:
1235       break;
1236   }
1237 
1238   // Now handle the case of level 4.
1239   assert(highest_training_level == CompLevel_full_optimization, "Unexpected compilation level: %d", highest_training_level);
1240   if (!training_has_profile) {
1241     // The method was a part of a level 4 compile, but don't have a stored profile,
1242     // we need to profile it.
1243     return CompLevel_full_profile;
1244   }
1245   const bool deopt = (static_cast<CompLevel>(method->highest_comp_level()) == CompLevel_full_optimization);
1246   // If we deopted, then we reprofile
1247   if (deopt && !is_method_profiled(method)) {
1248     return CompLevel_full_profile;
1249   }
1250 
1251   CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
1252   assert(ctd != nullptr, "Should have CTD for CompLevel_full_optimization");
1253   // With SkipTier2IfPossible and all deps satisfied, go to level 4 immediately
1254   if (SkipTier2IfPossible && ctd->init_deps_left() == 0) {
1255     if (method->method_data() == nullptr) {
1256       create_mdo(method, THREAD);
1257     }
1258     return CompLevel_full_optimization;
1259   }
1260 
1261   // Otherwise go to level 2
1262   return CompLevel_limited_profile;
1263 }
1264 
1265 
1266 CompLevel CompilationPolicy::trained_transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1267   precond(mtd != nullptr);
1268   precond(cur_level == CompLevel_limited_profile);
1269 
1270   // One of the main reasons that we can get here is that we're waiting for the stored C2 code to become ready.
1271 
1272   // But first, check if we have a saved profile
1273   bool training_has_profile = (mtd->final_profile() != nullptr);
1274   if (!training_has_profile) {
1275     return CompLevel_full_profile;
1276   }
1277 
1278 
1279   assert(training_has_profile, "Have to have a profile to be here");
1280   // Check if the method is ready
1281   CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
1282   if (ctd != nullptr && ctd->init_deps_left() == 0) {
1283     if (method->method_data() == nullptr) {
1284       create_mdo(method, THREAD);
1285     }
1286     return CompLevel_full_optimization;
1287   }
1288 
1289   // Otherwise stay at the current level
1290   return CompLevel_limited_profile;
1291 }
1292 
1293 
1294 CompLevel CompilationPolicy::trained_transition_from_full_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1295   precond(mtd != nullptr);
1296   precond(cur_level == CompLevel_full_profile);
1297 
1298   CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
1299   // We have method at the full profile level and we also know that it's possibly an important method.
1300   if (highest_training_level == CompLevel_full_optimization && !mtd->only_inlined()) {
1301     // Check if it is adequately profiled
1302     if (is_method_profiled(method)) {
1303       return CompLevel_full_optimization;
1304     }
1305   }
1306 
1307   // Otherwise stay at the current level
1308   return CompLevel_full_profile;
1309 }
1310 
1311 CompLevel CompilationPolicy::trained_transition(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1312   precond(MethodTrainingData::have_data());
1313 
1314   // If there is no training data recorded for this method, bail out.
1315   if (mtd == nullptr) {
1316     return cur_level;
1317   }
1318 
1319   CompLevel next_level = cur_level;
1320   switch(cur_level) {
1321     default: break;
1322     case CompLevel_none:
1323       next_level = trained_transition_from_none(method, cur_level, mtd, THREAD);
1324       break;
1325     case CompLevel_limited_profile:
1326       next_level = trained_transition_from_limited_profile(method, cur_level, mtd, THREAD);
1327       break;
1328     case CompLevel_full_profile:
1329       next_level = trained_transition_from_full_profile(method, cur_level, mtd, THREAD);
1330       break;
1331   }
1332 
1333   // We don't have any special strategies for the C2-only compilation modes, so just fix up the levels for now.
1334   if (CompilationModeFlag::high_only_quick_internal() && CompLevel_simple < next_level && next_level < CompLevel_full_optimization) {
1335     return CompLevel_none;
1336   }
1337   if (CompilationModeFlag::high_only() && next_level < CompLevel_full_optimization) {
1338     return CompLevel_none;
1339   }
1340   return (cur_level != next_level) ? limit_level(next_level) : cur_level;
1341 }
1342 
1343 /*
1344  * Method states:
1345  *   0 - interpreter (CompLevel_none)
1346  *   1 - pure C1 (CompLevel_simple)
1347  *   2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
1348  *   3 - C1 with full profiling (CompLevel_full_profile)
1349  *   4 - C2 or Graal (CompLevel_full_optimization)
1350  *
1351  * Common state transition patterns:
1352  * a. 0 -> 3 -> 4.
1353  *    The most common path. But note that even in this straightforward case
1354  *    profiling can start at level 0 and finish at level 3.
1355  *
1356  * b. 0 -> 2 -> 3 -> 4.
1357  *    This case occurs when the load on C2 is deemed too high. So, instead of transitioning
1358  *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
1359  *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
1360  *
1361  * c. 0 -> (3->2) -> 4.
1362  *    In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
1363  *    to enable the profiling to fully occur at level 0. In this case we change the compilation level
1364  *    of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
1365  *    without full profiling while c2 is compiling.
1366  *
1367  * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
1368  *    After a method was once compiled with C1 it can be identified as trivial and be compiled to
1369  *    level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
1370  *
1371  * e. 0 -> 4.
1372  *    This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
1373  *    or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
1374  *    the compiled version already exists).
1375  *
1376  * Note that since state 0 can be reached from any other state via deoptimization different loops
1377  * are possible.
1378  *
1379  */
1380 
1381 // Common transition function. Given a predicate determines if a method should transition to another level.
1382 template<typename Predicate>
1383 CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD, bool disable_feedback) {
1384   CompLevel next_level = cur_level;
1385   int i = method->invocation_count();
1386   int b = method->backedge_count();
1387 
1388   if (force_comp_at_level_simple(method)) {
1389     next_level = CompLevel_simple;
1390   } else {
1391     if (MethodTrainingData::have_data()) {
1392       MethodTrainingData* mtd = MethodTrainingData::find(method);
1393       if (mtd == nullptr) {
1394         // We haven't see compilations of this method in training. It's either very cold or the behavior changed.
1395         // Feed it to the standard TF with no profiling delay.
1396         next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
1397       } else {
1398         next_level = trained_transition(method, cur_level, mtd, THREAD);
1399         if (cur_level == next_level) {
1400           // trained_transtion() is going to return the same level if no startup/warmup optimizations apply.
1401           // In order to catch possible pathologies due to behavior change we feed the event to the regular
1402           // TF but with profiling delay.
1403           next_level = standard_transition<Predicate>(method, cur_level, true /*delay_profiling*/, disable_feedback);
1404         }
1405       }
1406     } else if (is_trivial(method) || method->is_native()) {
1407       next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_full_optimization : CompLevel_simple;
1408     } else {
1409       next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
1410     }
1411   }
1412   return (next_level != cur_level) ? limit_level(next_level) : next_level;
1413 }
1414 
1415 
1416 template<typename Predicate>
1417 CompLevel CompilationPolicy::standard_transition(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1418   CompLevel next_level = cur_level;
1419   switch(cur_level) {
1420   default: break;
1421   case CompLevel_none:
1422     next_level = transition_from_none<Predicate>(method, cur_level, delay_profiling, disable_feedback);
1423     break;
1424   case CompLevel_limited_profile:
1425     next_level = transition_from_limited_profile<Predicate>(method, cur_level, delay_profiling, disable_feedback);
1426     break;
1427   case CompLevel_full_profile:
1428     next_level = transition_from_full_profile<Predicate>(method, cur_level);
1429     break;
1430   }
1431   return next_level;
1432 }
1433 
1434 template<typename Predicate>
1435 CompLevel CompilationPolicy::transition_from_none(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1436   precond(cur_level == CompLevel_none);
1437   CompLevel next_level = cur_level;
1438   int i = method->invocation_count();
1439   int b = method->backedge_count();
1440   double scale = delay_profiling ? Tier0ProfileDelayFactor : 1.0;
1441   // If we were at full profile level, would we switch to full opt?
1442   if (transition_from_full_profile<Predicate>(method, CompLevel_full_profile) == CompLevel_full_optimization) {
1443     next_level = CompLevel_full_optimization;
1444   } else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply_scaled(method, cur_level, i, b, scale)) {
1445     // C1-generated fully profiled code is about 30% slower than the limited profile
1446     // code that has only invocation and backedge counters. The observation is that
1447     // if C2 queue is large enough we can spend too much time in the fully profiled code
1448     // while waiting for C2 to pick the method from the queue. To alleviate this problem
1449     // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
1450     // we choose to compile a limited profiled version and then recompile with full profiling
1451     // when the load on C2 goes down.
1452     if (delay_profiling || (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > Tier3DelayOn * compiler_count(CompLevel_full_optimization))) {
1453       next_level = CompLevel_limited_profile;
1454     } else {
1455       next_level = CompLevel_full_profile;
1456     }
1457   }
1458   return next_level;
1459 }
1460 
1461 template<typename Predicate>
1462 CompLevel CompilationPolicy::transition_from_full_profile(const methodHandle& method, CompLevel cur_level) {
1463   precond(cur_level == CompLevel_full_profile);
1464   CompLevel next_level = cur_level;
1465   MethodData* mdo = method->method_data();
1466   if (mdo != nullptr) {
1467     if (mdo->would_profile() || CompilationModeFlag::disable_intermediate()) {
1468       int mdo_i = mdo->invocation_count_delta();
1469       int mdo_b = mdo->backedge_count_delta();
1470       if (Predicate::apply(method, cur_level, mdo_i, mdo_b)) {
1471         next_level = CompLevel_full_optimization;
1472       }
1473     } else {
1474       next_level = CompLevel_full_optimization;
1475     }
1476   }
1477   return next_level;
1478 }
1479 
1480 template<typename Predicate>
1481 CompLevel CompilationPolicy::transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1482   precond(cur_level == CompLevel_limited_profile);
1483   CompLevel next_level = cur_level;
1484   int i = method->invocation_count();
1485   int b = method->backedge_count();
1486   double scale = delay_profiling ? Tier2ProfileDelayFactor : 1.0;
1487   MethodData* mdo = method->method_data();
1488   if (mdo != nullptr) {
1489     if (mdo->would_profile()) {
1490       if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1491                               Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1492                               Predicate::apply_scaled(method, cur_level, i, b, scale))) {
1493         next_level = CompLevel_full_profile;
1494       }
1495     } else {
1496       next_level = CompLevel_full_optimization;
1497     }
1498   } else {
1499     // If there is no MDO we need to profile
1500     if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1501                             Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1502                             Predicate::apply_scaled(method, cur_level, i, b, scale))) {
1503       next_level = CompLevel_full_profile;
1504     }
1505   }
1506   if (next_level == CompLevel_full_profile && is_method_profiled(method)) {
1507     next_level = CompLevel_full_optimization;
1508   }
1509   return next_level;
1510 }
1511 
1512 
1513 // Determine if a method should be compiled with a normal entry point at a different level.
1514 CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
1515   CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common<LoopPredicate>(method, cur_level, THREAD, true));
1516   CompLevel next_level = common<CallPredicate>(method, cur_level, THREAD, !TrainingData::have_data() && is_old(method));
1517 
1518   // If OSR method level is greater than the regular method level, the levels should be
1519   // equalized by raising the regular method level in order to avoid OSRs during each
1520   // invocation of the method.
1521   if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
1522     MethodData* mdo = method->method_data();
1523     guarantee(mdo != nullptr, "MDO should not be nullptr");
1524     if (mdo->invocation_count() >= 1) {
1525       next_level = CompLevel_full_optimization;
1526     }
1527   } else {
1528     next_level = MAX2(osr_level, next_level);
1529   }
1530 #if INCLUDE_JVMCI
1531   if (EnableJVMCI && UseJVMCICompiler &&
1532       next_level == CompLevel_full_optimization && !AOTLinkedClassBulkLoader::class_preloading_finished()) {
1533     next_level = cur_level;
1534   }
1535 #endif
1536   return next_level;
1537 }
1538 
1539 // Determine if we should do an OSR compilation of a given method.
1540 CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
1541   CompLevel next_level = common<LoopPredicate>(method, cur_level, THREAD, true);
1542   if (cur_level == CompLevel_none) {
1543     // If there is a live OSR method that means that we deopted to the interpreter
1544     // for the transition.
1545     CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
1546     if (osr_level > CompLevel_none) {
1547       return osr_level;
1548     }
1549   }
1550   return next_level;
1551 }
1552 
1553 // Handle the invocation event.
1554 void CompilationPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
1555                                                       CompLevel level, nmethod* nm, TRAPS) {
1556   if (should_create_mdo(mh, level)) {
1557     create_mdo(mh, THREAD);
1558   }
1559   CompLevel next_level = call_event(mh, level, THREAD);
1560   if (next_level != level) {
1561     if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
1562       compile(mh, InvocationEntryBci, next_level, THREAD);
1563     }
1564   }
1565 }
1566 
1567 // Handle the back branch event. Notice that we can compile the method
1568 // with a regular entry from here.
1569 void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
1570                                                      int bci, CompLevel level, nmethod* nm, TRAPS) {
1571   if (should_create_mdo(mh, level)) {
1572     create_mdo(mh, THREAD);
1573   }
1574   // Check if MDO should be created for the inlined method
1575   if (should_create_mdo(imh, level)) {
1576     create_mdo(imh, THREAD);
1577   }
1578 
1579   if (is_compilation_enabled()) {
1580     CompLevel next_osr_level = loop_event(imh, level, THREAD);
1581     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
1582     // At the very least compile the OSR version
1583     if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
1584       compile(imh, bci, next_osr_level, CHECK);
1585     }
1586 
1587     // Use loop event as an opportunity to also check if there's been
1588     // enough calls.
1589     CompLevel cur_level, next_level;
1590     if (mh() != imh()) { // If there is an enclosing method
1591       {
1592         guarantee(nm != nullptr, "Should have nmethod here");
1593         cur_level = comp_level(mh());
1594         next_level = call_event(mh, cur_level, THREAD);
1595 
1596         if (max_osr_level == CompLevel_full_optimization) {
1597           // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
1598           bool make_not_entrant = false;
1599           if (nm->is_osr_method()) {
1600             // This is an osr method, just make it not entrant and recompile later if needed
1601             make_not_entrant = true;
1602           } else {
1603             if (next_level != CompLevel_full_optimization) {
1604               // next_level is not full opt, so we need to recompile the
1605               // enclosing method without the inlinee
1606               cur_level = CompLevel_none;
1607               make_not_entrant = true;
1608             }
1609           }
1610           if (make_not_entrant) {
1611             if (PrintTieredEvents) {
1612               int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
1613               print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
1614             }
1615             nm->make_not_entrant();
1616           }
1617         }
1618         // Fix up next_level if necessary to avoid deopts
1619         if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
1620           next_level = CompLevel_full_profile;
1621         }
1622         if (cur_level != next_level) {
1623           if (!CompileBroker::compilation_is_in_queue(mh)) {
1624             compile(mh, InvocationEntryBci, next_level, THREAD);
1625           }
1626         }
1627       }
1628     } else {
1629       cur_level = comp_level(mh());
1630       next_level = call_event(mh, cur_level, THREAD);
1631       if (next_level != cur_level) {
1632         if (!CompileBroker::compilation_is_in_queue(mh)) {
1633           compile(mh, InvocationEntryBci, next_level, THREAD);
1634         }
1635       }
1636     }
1637   }
1638 }
1639