1 /*
   2  * Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/aotLinkedClassBulkLoader.hpp"
  26 #include "code/SCCache.hpp"
  27 #include "code/scopeDesc.hpp"
  28 #include "compiler/compilationPolicy.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compilerDefinitions.inline.hpp"
  31 #include "compiler/compilerOracle.hpp"
  32 #include "compiler/recompilationPolicy.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/method.inline.hpp"
  35 #include "oops/methodData.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "oops/trainingData.hpp"
  38 #include "prims/jvmtiExport.hpp"
  39 #include "runtime/arguments.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 #include "runtime/frame.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/globals_extension.hpp"
  44 #include "runtime/handles.inline.hpp"
  45 #include "runtime/safepoint.hpp"
  46 #include "runtime/safepointVerifiers.hpp"
  47 #ifdef COMPILER1
  48 #include "c1/c1_Compiler.hpp"
  49 #endif
  50 #ifdef COMPILER2
  51 #include "opto/c2compiler.hpp"
  52 #endif
  53 #if INCLUDE_JVMCI
  54 #include "jvmci/jvmci.hpp"
  55 #endif
  56 
  57 int64_t CompilationPolicy::_start_time = 0;
  58 int CompilationPolicy::_c1_count = 0;
  59 int CompilationPolicy::_c2_count = 0;
  60 int CompilationPolicy::_c3_count = 0;
  61 int CompilationPolicy::_sc_count = 0;
  62 double CompilationPolicy::_increase_threshold_at_ratio = 0;
  63 
  64 CompilationPolicy::TrainingReplayQueue CompilationPolicy::_training_replay_queue;
  65 
  66 void compilationPolicy_init() {
  67   CompilationPolicy::initialize();
  68 }
  69 
  70 int CompilationPolicy::compiler_count(CompLevel comp_level) {
  71   if (is_c1_compile(comp_level)) {
  72     return c1_count();
  73   } else if (is_c2_compile(comp_level)) {
  74     return c2_count();
  75   }
  76   return 0;
  77 }
  78 
  79 // Returns true if m must be compiled before executing it
  80 // This is intended to force compiles for methods (usually for
  81 // debugging) that would otherwise be interpreted for some reason.
  82 bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
  83   // Don't allow Xcomp to cause compiles in replay mode
  84   if (ReplayCompiles) return false;
  85 
  86   if (m->has_compiled_code()) return false;       // already compiled
  87   if (!can_be_compiled(m, comp_level)) return false;
  88 
  89   return !UseInterpreter ||                                                                        // must compile all methods
  90          (AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
  91 }
  92 
  93 void CompilationPolicy::maybe_compile_early(const methodHandle& m, TRAPS) {
  94   if (m->method_holder()->is_not_initialized()) {
  95     // 'is_not_initialized' means not only '!is_initialized', but also that
  96     // initialization has not been started yet ('!being_initialized')
  97     // Do not force compilation of methods in uninitialized classes.
  98     return;
  99   }
 100   if (!m->is_native() && MethodTrainingData::have_data()) {
 101     MethodTrainingData* mtd = MethodTrainingData::find_fast(m);
 102     if (mtd == nullptr) {
 103       return;              // there is no training data recorded for m
 104     }
 105     bool recompile = m->code_has_clinit_barriers();
 106     CompLevel cur_level = static_cast<CompLevel>(m->highest_comp_level());
 107     CompLevel next_level = trained_transition(m, cur_level, mtd, THREAD);
 108     if ((next_level != cur_level || recompile) && can_be_compiled(m, next_level) && !CompileBroker::compilation_is_in_queue(m)) {
 109       bool requires_online_compilation = false;
 110       CompileTrainingData* ctd = mtd->last_toplevel_compile(next_level);
 111       if (ctd != nullptr) {
 112         requires_online_compilation = (ctd->init_deps_left() > 0);
 113       }
 114       if (requires_online_compilation && recompile) {
 115         return;
 116       }
 117       if (PrintTieredEvents) {
 118         print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, next_level);
 119       }
 120       CompileBroker::compile_method(m, InvocationEntryBci, next_level, methodHandle(), 0, requires_online_compilation, CompileTask::Reason_MustBeCompiled, THREAD);
 121       if (HAS_PENDING_EXCEPTION) {
 122         CLEAR_PENDING_EXCEPTION;
 123       }
 124     }
 125   }
 126 }
 127 
 128 void CompilationPolicy::maybe_compile_early_after_init(const methodHandle& m, TRAPS) {
 129   assert(m->method_holder()->is_initialized(), "Should be called after class initialization");
 130   maybe_compile_early(m, THREAD);
 131 }
 132 
 133 void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
 134   if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
 135     // don't force compilation, resolve was on behalf of compiler
 136     return;
 137   }
 138   if (m->method_holder()->is_not_initialized()) {
 139     // 'is_not_initialized' means not only '!is_initialized', but also that
 140     // initialization has not been started yet ('!being_initialized')
 141     // Do not force compilation of methods in uninitialized classes.
 142     // Note that doing this would throw an assert later,
 143     // in CompileBroker::compile_method.
 144     // We sometimes use the link resolver to do reflective lookups
 145     // even before classes are initialized.
 146     return;
 147   }
 148 
 149   if (must_be_compiled(m)) {
 150     // This path is unusual, mostly used by the '-Xcomp' stress test mode.
 151     CompLevel level = initial_compile_level(m);
 152     if (PrintTieredEvents) {
 153       print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, level);
 154     }
 155     CompileBroker::compile_method(m, InvocationEntryBci, level, methodHandle(), 0, false, CompileTask::Reason_MustBeCompiled, THREAD);
 156   }
 157 }
 158 
 159 void CompilationPolicy::replay_training_at_init_impl(InstanceKlass* klass, TRAPS) {
 160   if (!klass->has_init_deps_processed()) {
 161     ResourceMark rm;
 162     log_debug(training)("Replay training: %s", klass->external_name());
 163 
 164     KlassTrainingData* ktd = KlassTrainingData::find(klass);
 165     if (ktd != nullptr) {
 166       guarantee(ktd->has_holder(), "");
 167       ktd->notice_fully_initialized(); // sets klass->has_init_deps_processed bit
 168       assert(klass->has_init_deps_processed(), "");
 169 
 170       ktd->iterate_comp_deps([&](CompileTrainingData* ctd) {
 171         if (ctd->init_deps_left() == 0) {
 172           MethodTrainingData* mtd = ctd->method();
 173           if (mtd->has_holder()) {
 174             const methodHandle mh(THREAD, const_cast<Method*>(mtd->holder()));
 175             CompilationPolicy::maybe_compile_early(mh, THREAD);
 176           }
 177         }
 178       });
 179     }
 180   }
 181 }
 182 
 183 void CompilationPolicy::flush_replay_training_at_init(TRAPS) {
 184    MonitorLocker locker(THREAD, TrainingReplayQueue_lock);
 185    while (!_training_replay_queue.is_empty_unlocked()) {
 186      locker.wait(); // let the replay training thread drain the queue
 187    }
 188 }
 189 
 190 void CompilationPolicy::replay_training_at_init(InstanceKlass* klass, TRAPS) {
 191   assert(klass->is_initialized(), "");
 192   if (TrainingData::have_data() && klass->is_shared()) {
 193     _training_replay_queue.push(klass, TrainingReplayQueue_lock, THREAD);
 194   }
 195 }
 196 
 197 // For TrainingReplayQueue
 198 template<>
 199 void CompilationPolicyUtils::Queue<InstanceKlass>::print_on(outputStream* st) {
 200   int pos = 0;
 201   for (QueueNode* cur = _head; cur != nullptr; cur = cur->next()) {
 202     ResourceMark rm;
 203     InstanceKlass* ik = cur->value();
 204     st->print_cr("%3d: " INTPTR_FORMAT " %s", ++pos, p2i(ik), ik->external_name());
 205   }
 206 }
 207 
 208 void CompilationPolicy::replay_training_at_init_loop(TRAPS) {
 209   while (!CompileBroker::is_compilation_disabled_forever() || AOTVerifyTrainingData) {
 210     InstanceKlass* ik = _training_replay_queue.pop(TrainingReplayQueue_lock, THREAD);
 211     replay_training_at_init_impl(ik, THREAD);
 212   }
 213 }
 214 
 215 static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_level) {
 216   if (comp_level == CompLevel_any) {
 217      if (CompilerConfig::is_c1_only()) {
 218        comp_level = CompLevel_simple;
 219      } else if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
 220        comp_level = CompLevel_full_optimization;
 221      }
 222   }
 223   return comp_level;
 224 }
 225 
 226 // Returns true if m is allowed to be compiled
 227 bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
 228   // allow any levels for WhiteBox
 229   assert(WhiteBoxAPI || comp_level == CompLevel_any || is_compile(comp_level), "illegal compilation level %d", comp_level);
 230 
 231   if (m->is_abstract()) return false;
 232   if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
 233 
 234   // Math intrinsics should never be compiled as this can lead to
 235   // monotonicity problems because the interpreter will prefer the
 236   // compiled code to the intrinsic version.  This can't happen in
 237   // production because the invocation counter can't be incremented
 238   // but we shouldn't expose the system to this problem in testing
 239   // modes.
 240   if (!AbstractInterpreter::can_be_compiled(m)) {
 241     return false;
 242   }
 243   comp_level = adjust_level_for_compilability_query((CompLevel) comp_level);
 244   if (comp_level == CompLevel_any || is_compile(comp_level)) {
 245     return !m->is_not_compilable(comp_level);
 246   }
 247   return false;
 248 }
 249 
 250 // Returns true if m is allowed to be osr compiled
 251 bool CompilationPolicy::can_be_osr_compiled(const methodHandle& m, int comp_level) {
 252   bool result = false;
 253   comp_level = adjust_level_for_compilability_query((CompLevel) comp_level);
 254   if (comp_level == CompLevel_any || is_compile(comp_level)) {
 255     result = !m->is_not_osr_compilable(comp_level);
 256   }
 257   return (result && can_be_compiled(m, comp_level));
 258 }
 259 
 260 bool CompilationPolicy::is_compilation_enabled() {
 261   // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
 262   return CompileBroker::should_compile_new_jobs();
 263 }
 264 
 265 CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue) {
 266   // Remove unloaded methods from the queue
 267   for (CompileTask* task = compile_queue->first(); task != nullptr; ) {
 268     CompileTask* next = task->next();
 269     if (task->is_unloaded()) {
 270       compile_queue->remove_and_mark_stale(task);
 271     }
 272     task = next;
 273   }
 274 #if INCLUDE_JVMCI
 275   if (UseJVMCICompiler && !BackgroundCompilation) {
 276     /*
 277      * In blocking compilation mode, the CompileBroker will make
 278      * compilations submitted by a JVMCI compiler thread non-blocking. These
 279      * compilations should be scheduled after all blocking compilations
 280      * to service non-compiler related compilations sooner and reduce the
 281      * chance of such compilations timing out.
 282      */
 283     for (CompileTask* task = compile_queue->first(); task != nullptr; task = task->next()) {
 284       if (task->is_blocking()) {
 285         return task;
 286       }
 287     }
 288   }
 289 #endif
 290   return compile_queue->first();
 291 }
 292 
 293 // Simple methods are as good being compiled with C1 as C2.
 294 // Determine if a given method is such a case.
 295 bool CompilationPolicy::is_trivial(const methodHandle& method) {
 296   if (method->is_accessor() ||
 297       method->is_constant_getter()) {
 298     return true;
 299   }
 300   return false;
 301 }
 302 
 303 bool CompilationPolicy::force_comp_at_level_simple(const methodHandle& method) {
 304   if (CompilationModeFlag::quick_internal()) {
 305 #if INCLUDE_JVMCI
 306     if (UseJVMCICompiler) {
 307       AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
 308       if (comp != nullptr && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
 309         return !SCCache::is_C3_on();
 310       }
 311     }
 312 #endif
 313   }
 314   return false;
 315 }
 316 
 317 CompLevel CompilationPolicy::comp_level(Method* method) {
 318   nmethod *nm = method->code();
 319   if (nm != nullptr && nm->is_in_use()) {
 320     return (CompLevel)nm->comp_level();
 321   }
 322   return CompLevel_none;
 323 }
 324 
 325 // Call and loop predicates determine whether a transition to a higher
 326 // compilation level should be performed (pointers to predicate functions
 327 // are passed to common()).
 328 // Tier?LoadFeedback is basically a coefficient that determines of
 329 // how many methods per compiler thread can be in the queue before
 330 // the threshold values double.
 331 class LoopPredicate : AllStatic {
 332 public:
 333   static bool apply_scaled(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
 334     double threshold_scaling;
 335     if (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, threshold_scaling)) {
 336       scale *= threshold_scaling;
 337     }
 338     switch(cur_level) {
 339     case CompLevel_none:
 340     case CompLevel_limited_profile:
 341       return b >= Tier3BackEdgeThreshold * scale;
 342     case CompLevel_full_profile:
 343       return b >= Tier4BackEdgeThreshold * scale;
 344     default:
 345       return true;
 346     }
 347   }
 348 
 349   static bool apply(const methodHandle& method, CompLevel cur_level, int i, int b) {
 350     double k = 1;
 351     switch(cur_level) {
 352     case CompLevel_none:
 353     // Fall through
 354     case CompLevel_limited_profile: {
 355       k = CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 356       break;
 357     }
 358     case CompLevel_full_profile: {
 359       k = CompilationPolicy::threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 360       break;
 361     }
 362     default:
 363       return true;
 364     }
 365     return apply_scaled(method, cur_level, i, b, k);
 366   }
 367 };
 368 
 369 class CallPredicate : AllStatic {
 370 public:
 371   static bool apply_scaled(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
 372     double threshold_scaling;
 373     if (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, threshold_scaling)) {
 374       scale *= threshold_scaling;
 375     }
 376     switch(cur_level) {
 377     case CompLevel_none:
 378     case CompLevel_limited_profile:
 379       return (i >= Tier3InvocationThreshold * scale) ||
 380              (i >= Tier3MinInvocationThreshold * scale && i + b >= Tier3CompileThreshold * scale);
 381     case CompLevel_full_profile:
 382       return (i >= Tier4InvocationThreshold * scale) ||
 383              (i >= Tier4MinInvocationThreshold * scale && i + b >= Tier4CompileThreshold * scale);
 384     default:
 385      return true;
 386     }
 387   }
 388 
 389   static bool apply(const methodHandle& method, CompLevel cur_level, int i, int b) {
 390     double k = 1;
 391     switch(cur_level) {
 392     case CompLevel_none:
 393     case CompLevel_limited_profile: {
 394       k = CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 395       break;
 396     }
 397     case CompLevel_full_profile: {
 398       k = CompilationPolicy::threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 399       break;
 400     }
 401     default:
 402       return true;
 403     }
 404     return apply_scaled(method, cur_level, i, b, k);
 405   }
 406 };
 407 
 408 double CompilationPolicy::threshold_scale(CompLevel level, int feedback_k) {
 409   int comp_count = compiler_count(level);
 410   if (comp_count > 0) {
 411     double queue_size = CompileBroker::queue_size(level);
 412     double k = (double)queue_size / ((double)feedback_k * (double)comp_count) + 1;
 413 
 414     // Increase C1 compile threshold when the code cache is filled more
 415     // than specified by IncreaseFirstTierCompileThresholdAt percentage.
 416     // The main intention is to keep enough free space for C2 compiled code
 417     // to achieve peak performance if the code cache is under stress.
 418     if (CompilerConfig::is_tiered() && !CompilationModeFlag::disable_intermediate() && is_c1_compile(level))  {
 419       double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
 420       if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
 421         k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
 422       }
 423     }
 424     return k;
 425   }
 426   return 1;
 427 }
 428 
 429 void CompilationPolicy::print_counters(const char* prefix, Method* m) {
 430   int invocation_count = m->invocation_count();
 431   int backedge_count = m->backedge_count();
 432   MethodData* mdh = m->method_data();
 433   int mdo_invocations = 0, mdo_backedges = 0;
 434   int mdo_invocations_start = 0, mdo_backedges_start = 0;
 435   if (mdh != nullptr) {
 436     mdo_invocations = mdh->invocation_count();
 437     mdo_backedges = mdh->backedge_count();
 438     mdo_invocations_start = mdh->invocation_count_start();
 439     mdo_backedges_start = mdh->backedge_count_start();
 440   }
 441   tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix,
 442       invocation_count, backedge_count, prefix,
 443       mdo_invocations, mdo_invocations_start,
 444       mdo_backedges, mdo_backedges_start);
 445   tty->print(" %smax levels=%d,%d", prefix,
 446       m->highest_comp_level(), m->highest_osr_comp_level());
 447 }
 448 
 449 void CompilationPolicy::print_training_data(const char* prefix, Method* method) {
 450   methodHandle m(Thread::current(), method);
 451   tty->print(" %smtd: ", prefix);
 452   MethodTrainingData* mtd = MethodTrainingData::find(m);
 453   if (mtd == nullptr) {
 454     tty->print("null");
 455   } else {
 456     MethodData* md = mtd->final_profile();
 457     tty->print("mdo=");
 458     if (md == nullptr) {
 459       tty->print("null");
 460     } else {
 461       int mdo_invocations = md->invocation_count();
 462       int mdo_backedges = md->backedge_count();
 463       int mdo_invocations_start = md->invocation_count_start();
 464       int mdo_backedges_start = md->backedge_count_start();
 465       tty->print("%d(%d), %d(%d)", mdo_invocations, mdo_invocations_start, mdo_backedges, mdo_backedges_start);
 466     }
 467     CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
 468     tty->print(", deps=");
 469     if (ctd == nullptr) {
 470       tty->print("null");
 471     } else {
 472       tty->print("%d", ctd->init_deps_left());
 473     }
 474   }
 475 }
 476 
 477 // Print an event.
 478 void CompilationPolicy::print_event(EventType type, Method* m, Method* im, int bci, CompLevel level) {
 479   bool inlinee_event = m != im;
 480 
 481   ttyLocker tty_lock;
 482   tty->print("%lf: [", os::elapsedTime());
 483 
 484   switch(type) {
 485   case CALL:
 486     tty->print("call");
 487     break;
 488   case LOOP:
 489     tty->print("loop");
 490     break;
 491   case COMPILE:
 492     tty->print("compile");
 493     break;
 494   case FORCE_COMPILE:
 495     tty->print("force-compile");
 496     break;
 497   case FORCE_RECOMPILE:
 498     tty->print("force-recompile");
 499     break;
 500   case REMOVE_FROM_QUEUE:
 501     tty->print("remove-from-queue");
 502     break;
 503   case UPDATE_IN_QUEUE:
 504     tty->print("update-in-queue");
 505     break;
 506   case REPROFILE:
 507     tty->print("reprofile");
 508     break;
 509   case MAKE_NOT_ENTRANT:
 510     tty->print("make-not-entrant");
 511     break;
 512   default:
 513     tty->print("unknown");
 514   }
 515 
 516   tty->print(" level=%d ", level);
 517 
 518   ResourceMark rm;
 519   char *method_name = m->name_and_sig_as_C_string();
 520   tty->print("[%s", method_name);
 521   if (inlinee_event) {
 522     char *inlinee_name = im->name_and_sig_as_C_string();
 523     tty->print(" [%s]] ", inlinee_name);
 524   }
 525   else tty->print("] ");
 526   tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
 527                                       CompileBroker::queue_size(CompLevel_full_optimization));
 528 
 529   tty->print(" rate=");
 530   if (m->prev_time() == 0) tty->print("n/a");
 531   else tty->print("%f", m->rate());
 532 
 533   RecompilationPolicy::print_load_average();
 534 
 535   tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
 536                                threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
 537 
 538   if (type != COMPILE) {
 539     print_counters("", m);
 540     if (inlinee_event) {
 541       print_counters("inlinee ", im);
 542     }
 543     tty->print(" compilable=");
 544     bool need_comma = false;
 545     if (!m->is_not_compilable(CompLevel_full_profile)) {
 546       tty->print("c1");
 547       need_comma = true;
 548     }
 549     if (!m->is_not_osr_compilable(CompLevel_full_profile)) {
 550       if (need_comma) tty->print(",");
 551       tty->print("c1-osr");
 552       need_comma = true;
 553     }
 554     if (!m->is_not_compilable(CompLevel_full_optimization)) {
 555       if (need_comma) tty->print(",");
 556       tty->print("c2");
 557       need_comma = true;
 558     }
 559     if (!m->is_not_osr_compilable(CompLevel_full_optimization)) {
 560       if (need_comma) tty->print(",");
 561       tty->print("c2-osr");
 562     }
 563     tty->print(" status=");
 564     if (m->queued_for_compilation()) {
 565       tty->print("in-queue");
 566     } else tty->print("idle");
 567     print_training_data("", m);
 568     if (inlinee_event) {
 569       print_training_data("inlinee ", im);
 570     }
 571   }
 572   tty->print_cr("]");
 573 }
 574 
 575 void CompilationPolicy::initialize() {
 576   if (!CompilerConfig::is_interpreter_only()) {
 577     if (StoreCachedCode) {
 578       // Assembly phase runs C1 and C2 compilation in separate phases,
 579       // and can use all the CPU threads it can reach. Adjust the common
 580       // options before policy starts overwriting them. There is a block
 581       // at the very end that overrides final thread counts.
 582       if (FLAG_IS_DEFAULT(UseDynamicNumberOfCompilerThreads)) {
 583         FLAG_SET_ERGO(UseDynamicNumberOfCompilerThreads, false);
 584       }
 585       if (FLAG_IS_DEFAULT(CICompilerCount)) {
 586         FLAG_SET_ERGO(CICompilerCount, MAX2(2, os::active_processor_count()));
 587       }
 588     }
 589     int count = CICompilerCount;
 590     bool c1_only = CompilerConfig::is_c1_only();
 591     bool c2_only = CompilerConfig::is_c2_or_jvmci_compiler_only();
 592 
 593 #ifdef _LP64
 594     // Turn on ergonomic compiler count selection
 595     if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
 596       FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
 597     }
 598     if (CICompilerCountPerCPU) {
 599       // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
 600       int log_cpu = log2i(os::active_processor_count());
 601       int loglog_cpu = log2i(MAX2(log_cpu, 1));
 602       count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2);
 603       // Make sure there is enough space in the code cache to hold all the compiler buffers
 604       size_t c1_size = 0;
 605 #ifdef COMPILER1
 606       c1_size = Compiler::code_buffer_size();
 607 #endif
 608       size_t c2_size = 0;
 609 #ifdef COMPILER2
 610       c2_size = C2Compiler::initial_code_buffer_size();
 611 #endif
 612       size_t buffer_size = c1_only ? c1_size : (c1_size/3 + 2*c2_size/3);
 613       int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size;
 614       if (count > max_count) {
 615         // Lower the compiler count such that all buffers fit into the code cache
 616         count = MAX2(max_count, c1_only ? 1 : 2);
 617       }
 618       FLAG_SET_ERGO(CICompilerCount, count);
 619     }
 620 #else
 621     // On 32-bit systems, the number of compiler threads is limited to 3.
 622     // On these systems, the virtual address space available to the JVM
 623     // is usually limited to 2-4 GB (the exact value depends on the platform).
 624     // As the compilers (especially C2) can consume a large amount of
 625     // memory, scaling the number of compiler threads with the number of
 626     // available cores can result in the exhaustion of the address space
 627     /// available to the VM and thus cause the VM to crash.
 628     if (FLAG_IS_DEFAULT(CICompilerCount)) {
 629       count = 3;
 630       FLAG_SET_ERGO(CICompilerCount, count);
 631     }
 632 #endif
 633 
 634     if (c1_only) {
 635       // No C2 compiler thread required
 636       set_c1_count(count);
 637     } else if (c2_only) {
 638       set_c2_count(count);
 639     } else {
 640 #if INCLUDE_JVMCI
 641       if (UseJVMCICompiler && UseJVMCINativeLibrary) {
 642         int libjvmci_count = MAX2((int) (count * JVMCINativeLibraryThreadFraction), 1);
 643         int c1_count = MAX2(count - libjvmci_count, 1);
 644         set_c2_count(libjvmci_count);
 645         set_c1_count(c1_count);
 646       } else if (SCCache::is_C3_on()) {
 647         set_c1_count(MAX2(count / 3, 1));
 648         set_c2_count(MAX2(count - c1_count(), 1));
 649         set_c3_count(1);
 650       } else
 651 #endif
 652       {
 653         set_c1_count(MAX2(count / 3, 1));
 654         set_c2_count(MAX2(count - c1_count(), 1));
 655       }
 656     }
 657     if (StoreCachedCode) {
 658       set_c1_count(count);
 659       set_c2_count(count);
 660       count *= 2; // satisfy the assert below
 661     }
 662     if (SCCache::is_code_load_thread_on()) {
 663       set_sc_count((c1_only || c2_only) ? 1 : 2); // At minimum we need 2 threads to load C1 and C2 cached code in parallel
 664     }
 665     assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
 666     set_increase_threshold_at_ratio();
 667   }
 668 
 669   set_start_time(nanos_to_millis(os::javaTimeNanos()));
 670 }
 671 
 672 
 673 
 674 
 675 #ifdef ASSERT
 676 bool CompilationPolicy::verify_level(CompLevel level) {
 677   if (TieredCompilation && level > TieredStopAtLevel) {
 678     return false;
 679   }
 680   // Check if there is a compiler to process the requested level
 681   if (!CompilerConfig::is_c1_enabled() && is_c1_compile(level)) {
 682     return false;
 683   }
 684   if (!CompilerConfig::is_c2_or_jvmci_compiler_enabled() && is_c2_compile(level)) {
 685     return false;
 686   }
 687 
 688   // Interpreter level is always valid.
 689   if (level == CompLevel_none) {
 690     return true;
 691   }
 692   if (CompilationModeFlag::normal()) {
 693     return true;
 694   } else if (CompilationModeFlag::quick_only()) {
 695     return level == CompLevel_simple;
 696   } else if (CompilationModeFlag::high_only()) {
 697     return level == CompLevel_full_optimization;
 698   } else if (CompilationModeFlag::high_only_quick_internal()) {
 699     return level == CompLevel_full_optimization || level == CompLevel_simple;
 700   }
 701   return false;
 702 }
 703 #endif
 704 
 705 
 706 CompLevel CompilationPolicy::highest_compile_level() {
 707   CompLevel level = CompLevel_none;
 708   // Setup the maximum level available for the current compiler configuration.
 709   if (!CompilerConfig::is_interpreter_only()) {
 710     if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
 711       level = CompLevel_full_optimization;
 712     } else if (CompilerConfig::is_c1_enabled()) {
 713       if (CompilerConfig::is_c1_simple_only()) {
 714         level = CompLevel_simple;
 715       } else {
 716         level = CompLevel_full_profile;
 717       }
 718     }
 719   }
 720   // Clamp the maximum level with TieredStopAtLevel.
 721   if (TieredCompilation) {
 722     level = MIN2(level, (CompLevel) TieredStopAtLevel);
 723   }
 724 
 725   // Fix it up if after the clamping it has become invalid.
 726   // Bring it monotonically down depending on the next available level for
 727   // the compilation mode.
 728   if (!CompilationModeFlag::normal()) {
 729     // a) quick_only - levels 2,3,4 are invalid; levels -1,0,1 are valid;
 730     // b) high_only - levels 1,2,3 are invalid; levels -1,0,4 are valid;
 731     // c) high_only_quick_internal - levels 2,3 are invalid; levels -1,0,1,4 are valid.
 732     if (CompilationModeFlag::quick_only()) {
 733       if (level == CompLevel_limited_profile || level == CompLevel_full_profile || level == CompLevel_full_optimization) {
 734         level = CompLevel_simple;
 735       }
 736     } else if (CompilationModeFlag::high_only()) {
 737       if (level == CompLevel_simple || level == CompLevel_limited_profile || level == CompLevel_full_profile) {
 738         level = CompLevel_none;
 739       }
 740     } else if (CompilationModeFlag::high_only_quick_internal()) {
 741       if (level == CompLevel_limited_profile || level == CompLevel_full_profile) {
 742         level = CompLevel_simple;
 743       }
 744     }
 745   }
 746 
 747   assert(verify_level(level), "Invalid highest compilation level: %d", level);
 748   return level;
 749 }
 750 
 751 CompLevel CompilationPolicy::limit_level(CompLevel level) {
 752   level = MIN2(level, highest_compile_level());
 753   assert(verify_level(level), "Invalid compilation level: %d", level);
 754   return level;
 755 }
 756 
 757 CompLevel CompilationPolicy::initial_compile_level(const methodHandle& method) {
 758   CompLevel level = CompLevel_any;
 759   if (CompilationModeFlag::normal()) {
 760     level = CompLevel_full_profile;
 761   } else if (CompilationModeFlag::quick_only()) {
 762     level = CompLevel_simple;
 763   } else if (CompilationModeFlag::high_only()) {
 764     level = CompLevel_full_optimization;
 765   } else if (CompilationModeFlag::high_only_quick_internal()) {
 766     if (force_comp_at_level_simple(method)) {
 767       level = CompLevel_simple;
 768     } else {
 769       level = CompLevel_full_optimization;
 770     }
 771   }
 772   assert(level != CompLevel_any, "Unhandled compilation mode");
 773   return limit_level(level);
 774 }
 775 
 776 // Set carry flags on the counters if necessary
 777 void CompilationPolicy::handle_counter_overflow(const methodHandle& method) {
 778   MethodCounters *mcs = method->method_counters();
 779   if (mcs != nullptr) {
 780     mcs->invocation_counter()->set_carry_on_overflow();
 781     mcs->backedge_counter()->set_carry_on_overflow();
 782   }
 783   MethodData* mdo = method->method_data();
 784   if (mdo != nullptr) {
 785     mdo->invocation_counter()->set_carry_on_overflow();
 786     mdo->backedge_counter()->set_carry_on_overflow();
 787   }
 788 }
 789 
 790 // Called with the queue locked and with at least one element
 791 CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue, JavaThread* THREAD) {
 792   CompileTask *max_blocking_task = nullptr;
 793   CompileTask *max_task = nullptr;
 794   Method* max_method = nullptr;
 795 
 796   int64_t t = nanos_to_millis(os::javaTimeNanos());
 797   // Iterate through the queue and find a method with a maximum rate.
 798   for (CompileTask* task = compile_queue->first(); task != nullptr;) {
 799     CompileTask* next_task = task->next();
 800     // If a method was unloaded or has been stale for some time, remove it from the queue.
 801     // Blocking tasks and tasks submitted from whitebox API don't become stale
 802     if (task->is_unloaded()) {
 803       compile_queue->remove_and_mark_stale(task);
 804       task = next_task;
 805       continue;
 806     }
 807     if (task->is_scc()) {
 808       // SCC tasks are on separate queue, and they should load fast. There is no need to walk
 809       // the rest of the queue, just take the task and go.
 810       return task;
 811     }
 812     if (task->is_blocking() && task->compile_reason() == CompileTask::Reason_Whitebox) {
 813       // CTW tasks, submitted as blocking Whitebox requests, do not participate in rate
 814       // selection and/or any level adjustments. Just return them in order.
 815       return task;
 816     }
 817     Method* method = task->method();
 818     methodHandle mh(THREAD, method);
 819     if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, mh) && !is_old(mh)) {
 820       if (PrintTieredEvents) {
 821         print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
 822       }
 823       method->clear_queued_for_compilation();
 824       method->set_pending_queue_processed(false);
 825       compile_queue->remove_and_mark_stale(task);
 826       task = next_task;
 827       continue;
 828     }
 829     update_rate(t, mh);
 830     if (max_task == nullptr || compare_methods(method, max_method) || compare_tasks(task, max_task)) {
 831       // Select a method with the highest rate
 832       max_task = task;
 833       max_method = method;
 834     }
 835 
 836     if (task->is_blocking()) {
 837       if (max_blocking_task == nullptr || compare_methods(method, max_blocking_task->method())) {
 838         max_blocking_task = task;
 839       }
 840     }
 841 
 842     task = next_task;
 843   }
 844 
 845   if (max_blocking_task != nullptr) {
 846     // In blocking compilation mode, the CompileBroker will make
 847     // compilations submitted by a JVMCI compiler thread non-blocking. These
 848     // compilations should be scheduled after all blocking compilations
 849     // to service non-compiler related compilations sooner and reduce the
 850     // chance of such compilations timing out.
 851     max_task = max_blocking_task;
 852     max_method = max_task->method();
 853   }
 854 
 855   methodHandle max_method_h(THREAD, max_method);
 856 
 857   if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
 858       max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
 859     max_task->set_comp_level(CompLevel_limited_profile);
 860 
 861     if (CompileBroker::compilation_is_complete(max_method_h(), max_task->osr_bci(), CompLevel_limited_profile,
 862                                                false /* requires_online_compilation */,
 863                                                CompileTask::Reason_None)) {
 864       if (PrintTieredEvents) {
 865         print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 866       }
 867       compile_queue->remove_and_mark_stale(max_task);
 868       max_method->clear_queued_for_compilation();
 869       return nullptr;
 870     }
 871 
 872     if (PrintTieredEvents) {
 873       print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 874     }
 875   }
 876   return max_task;
 877 }
 878 
 879 void CompilationPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
 880   for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
 881     if (PrintTieredEvents) {
 882       print_event(REPROFILE, sd->method(), sd->method(), InvocationEntryBci, CompLevel_none);
 883     }
 884     MethodData* mdo = sd->method()->method_data();
 885     if (mdo != nullptr) {
 886       mdo->reset_start_counters();
 887     }
 888     if (sd->is_top()) break;
 889   }
 890 }
 891 
 892 nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle& inlinee,
 893                                       int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
 894   if (PrintTieredEvents) {
 895     print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
 896   }
 897 
 898 #if INCLUDE_JVMCI
 899   if (EnableJVMCI && UseJVMCICompiler &&
 900       comp_level == CompLevel_full_optimization CDS_ONLY(&& !AOTLinkedClassBulkLoader::class_preloading_finished())) {
 901     return nullptr;
 902   }
 903 #endif
 904 
 905   if (comp_level == CompLevel_none &&
 906       JvmtiExport::can_post_interpreter_events() &&
 907       THREAD->is_interp_only_mode()) {
 908     return nullptr;
 909   }
 910   if (ReplayCompiles) {
 911     // Don't trigger other compiles in testing mode
 912     return nullptr;
 913   }
 914 
 915   handle_counter_overflow(method);
 916   if (method() != inlinee()) {
 917     handle_counter_overflow(inlinee);
 918   }
 919 
 920   if (bci == InvocationEntryBci) {
 921     method_invocation_event(method, inlinee, comp_level, nm, THREAD);
 922   } else {
 923     // method == inlinee if the event originated in the main method
 924     method_back_branch_event(method, inlinee, bci, comp_level, nm, THREAD);
 925     // Check if event led to a higher level OSR compilation
 926     CompLevel expected_comp_level = MIN2(CompLevel_full_optimization, static_cast<CompLevel>(comp_level + 1));
 927     if (!CompilationModeFlag::disable_intermediate() && inlinee->is_not_osr_compilable(expected_comp_level)) {
 928       // It's not possible to reach the expected level so fall back to simple.
 929       expected_comp_level = CompLevel_simple;
 930     }
 931     CompLevel max_osr_level = static_cast<CompLevel>(inlinee->highest_osr_comp_level());
 932     if (max_osr_level >= expected_comp_level) { // fast check to avoid locking in a typical scenario
 933       nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, expected_comp_level, false);
 934       assert(osr_nm == nullptr || osr_nm->comp_level() >= expected_comp_level, "lookup_osr_nmethod_for is broken");
 935       if (osr_nm != nullptr && osr_nm->comp_level() != comp_level) {
 936         // Perform OSR with new nmethod
 937         return osr_nm;
 938       }
 939     }
 940   }
 941   return nullptr;
 942 }
 943 
 944 // Check if the method can be compiled, change level if necessary
 945 void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level, TRAPS) {
 946   assert(verify_level(level), "Invalid compilation level requested: %d", level);
 947 
 948   if (level == CompLevel_none) {
 949     if (mh->has_compiled_code()) {
 950       // Happens when we switch to interpreter to profile.
 951       MutexLocker ml(Compile_lock);
 952       NoSafepointVerifier nsv;
 953       if (mh->has_compiled_code()) {
 954         mh->code()->make_not_used();
 955       }
 956       // Deoptimize immediately (we don't have to wait for a compile).
 957       JavaThread* jt = THREAD;
 958       RegisterMap map(jt,
 959                       RegisterMap::UpdateMap::skip,
 960                       RegisterMap::ProcessFrames::include,
 961                       RegisterMap::WalkContinuation::skip);
 962       frame fr = jt->last_frame().sender(&map);
 963       Deoptimization::deoptimize_frame(jt, fr.id());
 964     }
 965     return;
 966   }
 967 
 968   if (!CompilationModeFlag::disable_intermediate()) {
 969     // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling
 970     // in the interpreter and then compile with C2 (the transition function will request that,
 971     // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with
 972     // pure C1.
 973     if ((bci == InvocationEntryBci && !can_be_compiled(mh, level))) {
 974       if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
 975         compile(mh, bci, CompLevel_simple, THREAD);
 976       }
 977       return;
 978     }
 979     if ((bci != InvocationEntryBci && !can_be_osr_compiled(mh, level))) {
 980       if (level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) {
 981         nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
 982         if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) {
 983           // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
 984           osr_nm->make_not_entrant("OSR invalidation for compiling with C1");
 985         }
 986         compile(mh, bci, CompLevel_simple, THREAD);
 987       }
 988       return;
 989     }
 990   }
 991   if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
 992     return;
 993   }
 994   if (!CompileBroker::compilation_is_in_queue(mh)) {
 995     if (PrintTieredEvents) {
 996       print_event(COMPILE, mh(), mh(), bci, level);
 997     }
 998     int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
 999     update_rate(nanos_to_millis(os::javaTimeNanos()), mh);
1000     bool requires_online_compilation = false;
1001     if (TrainingData::have_data()) {
1002       MethodTrainingData* mtd = MethodTrainingData::find_fast(mh);
1003       if (mtd != nullptr) {
1004         CompileTrainingData* ctd = mtd->last_toplevel_compile(level);
1005         if (ctd != nullptr) {
1006           requires_online_compilation = (ctd->init_deps_left() > 0);
1007         }
1008       }
1009     }
1010     CompileBroker::compile_method(mh, bci, level, mh, hot_count, requires_online_compilation, CompileTask::Reason_Tiered, THREAD);
1011   }
1012 }
1013 
1014 // update_rate() is called from select_task() while holding a compile queue lock.
1015 void CompilationPolicy::update_rate(int64_t t, const methodHandle& method) {
1016   // Skip update if counters are absent.
1017   // Can't allocate them since we are holding compile queue lock.
1018   if (method->method_counters() == nullptr)  return;
1019 
1020   if (is_old(method)) {
1021     // We don't remove old methods from the queue,
1022     // so we can just zero the rate.
1023     method->set_rate(0);
1024     return;
1025   }
1026 
1027   // We don't update the rate if we've just came out of a safepoint.
1028   // delta_s is the time since last safepoint in milliseconds.
1029   int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
1030   int64_t delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement
1031   // How many events were there since the last time?
1032   int event_count = method->invocation_count() + method->backedge_count();
1033   int delta_e = event_count - method->prev_event_count();
1034 
1035   // We should be running for at least 1ms.
1036   if (delta_s >= TieredRateUpdateMinTime) {
1037     // And we must've taken the previous point at least 1ms before.
1038     if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
1039       method->set_prev_time(t);
1040       method->set_prev_event_count(event_count);
1041       method->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
1042     } else {
1043       if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
1044         // If nothing happened for 25ms, zero the rate. Don't modify prev values.
1045         method->set_rate(0);
1046       }
1047     }
1048   }
1049 }
1050 
1051 // Check if this method has been stale for a given number of milliseconds.
1052 // See select_task().
1053 bool CompilationPolicy::is_stale(int64_t t, int64_t timeout, const methodHandle& method) {
1054   int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
1055   int64_t delta_t = t - method->prev_time();
1056   if (delta_t > timeout && delta_s > timeout) {
1057     int event_count = method->invocation_count() + method->backedge_count();
1058     int delta_e = event_count - method->prev_event_count();
1059     // Return true if there were no events.
1060     return delta_e == 0;
1061   }
1062   return false;
1063 }
1064 
1065 // We don't remove old methods from the compile queue even if they have
1066 // very low activity. See select_task().
1067 bool CompilationPolicy::is_old(const methodHandle& method) {
1068   int i = method->invocation_count();
1069   int b = method->backedge_count();
1070   double k = TieredOldPercentage / 100.0;
1071 
1072   return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1073 }
1074 
1075 double CompilationPolicy::weight(Method* method) {
1076   return (double)(method->rate() + 1) * (method->invocation_count() + 1) * (method->backedge_count() + 1);
1077 }
1078 
1079 // Apply heuristics and return true if x should be compiled before y
1080 bool CompilationPolicy::compare_methods(Method* x, Method* y) {
1081   if (x->highest_comp_level() > y->highest_comp_level()) {
1082     // recompilation after deopt
1083     return true;
1084   } else
1085     if (x->highest_comp_level() == y->highest_comp_level()) {
1086       if (weight(x) > weight(y)) {
1087         return true;
1088       }
1089     }
1090   return false;
1091 }
1092 
1093 bool CompilationPolicy::compare_tasks(CompileTask* x, CompileTask* y) {
1094   assert(!x->is_scc() && !y->is_scc(), "SC tasks are not expected here");
1095   if (x->compile_reason() != y->compile_reason() && y->compile_reason() == CompileTask::Reason_MustBeCompiled) {
1096     return true;
1097   }
1098   return false;
1099 }
1100 
1101 // Is method profiled enough?
1102 bool CompilationPolicy::is_method_profiled(const methodHandle& method) {
1103   MethodData* mdo = method->method_data();
1104   if (mdo != nullptr) {
1105     int i = mdo->invocation_count_delta();
1106     int b = mdo->backedge_count_delta();
1107     return CallPredicate::apply_scaled(method, CompLevel_full_profile, i, b, 1);
1108   }
1109   return false;
1110 }
1111 
1112 
1113 // Determine is a method is mature.
1114 bool CompilationPolicy::is_mature(MethodData* mdo) {
1115   if (Arguments::is_compiler_only()) {
1116     // Always report profiles as immature with -Xcomp
1117     return false;
1118   }
1119   methodHandle mh(Thread::current(), mdo->method());
1120   if (mdo != nullptr) {
1121     int i = mdo->invocation_count();
1122     int b = mdo->backedge_count();
1123     double k = ProfileMaturityPercentage / 100.0;
1124     return CallPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k) || LoopPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k);
1125   }
1126   return false;
1127 }
1128 
1129 // If a method is old enough and is still in the interpreter we would want to
1130 // start profiling without waiting for the compiled method to arrive.
1131 // We also take the load on compilers into the account.
1132 bool CompilationPolicy::should_create_mdo(const methodHandle& method, CompLevel cur_level) {
1133   if (cur_level != CompLevel_none || force_comp_at_level_simple(method) || CompilationModeFlag::quick_only() || !ProfileInterpreter) {
1134     return false;
1135   }
1136 
1137   if (TrainingData::have_data()) {
1138     MethodTrainingData* mtd = MethodTrainingData::find_fast(method);
1139     if (mtd != nullptr && mtd->saw_level(CompLevel_full_optimization)) {
1140       return true;
1141     }
1142   }
1143 
1144   if (is_old(method)) {
1145     return true;
1146   }
1147 
1148   int i = method->invocation_count();
1149   int b = method->backedge_count();
1150   double k = Tier0ProfilingStartPercentage / 100.0;
1151 
1152   // If the top level compiler is not keeping up, delay profiling.
1153   if (CompileBroker::queue_size(CompLevel_full_optimization) <= Tier0Delay * compiler_count(CompLevel_full_optimization)) {
1154     return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1155   }
1156   return false;
1157 }
1158 
1159 // Inlining control: if we're compiling a profiled method with C1 and the callee
1160 // is known to have OSRed in a C2 version, don't inline it.
1161 bool CompilationPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
1162   CompLevel comp_level = (CompLevel)env->comp_level();
1163   if (comp_level == CompLevel_full_profile ||
1164       comp_level == CompLevel_limited_profile) {
1165     return callee->highest_osr_comp_level() == CompLevel_full_optimization;
1166   }
1167   return false;
1168 }
1169 
1170 // Create MDO if necessary.
1171 void CompilationPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
1172   if (mh->is_native() ||
1173       mh->is_abstract() ||
1174       mh->is_accessor() ||
1175       mh->is_constant_getter()) {
1176     return;
1177   }
1178   if (mh->method_data() == nullptr) {
1179     Method::build_profiling_method_data(mh, CHECK_AND_CLEAR);
1180   }
1181   if (ProfileInterpreter && THREAD->has_last_Java_frame()) {
1182     MethodData* mdo = mh->method_data();
1183     if (mdo != nullptr) {
1184       frame last_frame = THREAD->last_frame();
1185       if (last_frame.is_interpreted_frame() && mh == last_frame.interpreter_frame_method()) {
1186         int bci = last_frame.interpreter_frame_bci();
1187         address dp = mdo->bci_to_dp(bci);
1188         last_frame.interpreter_frame_set_mdp(dp);
1189       }
1190     }
1191   }
1192 }
1193 
1194 CompLevel CompilationPolicy::trained_transition_from_none(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1195   precond(mtd != nullptr);
1196   precond(cur_level == CompLevel_none);
1197 
1198   if (mtd->only_inlined() && !mtd->saw_level(CompLevel_full_optimization)) {
1199     return CompLevel_none;
1200   }
1201 
1202   bool training_has_profile = (mtd->final_profile() != nullptr);
1203   if (mtd->saw_level(CompLevel_full_optimization) && !training_has_profile) {
1204     return CompLevel_full_profile;
1205   }
1206 
1207   CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
1208   switch (highest_training_level) {
1209     case CompLevel_limited_profile:
1210     case CompLevel_full_profile:
1211       return CompLevel_limited_profile;
1212     case CompLevel_simple:
1213       return CompLevel_simple;
1214     case CompLevel_none:
1215       return CompLevel_none;
1216     default:
1217       break;
1218   }
1219 
1220   // Now handle the case of level 4.
1221   assert(highest_training_level == CompLevel_full_optimization, "Unexpected compilation level: %d", highest_training_level);
1222   if (!training_has_profile) {
1223     // The method was a part of a level 4 compile, but don't have a stored profile,
1224     // we need to profile it.
1225     return CompLevel_full_profile;
1226   }
1227   const bool deopt = (static_cast<CompLevel>(method->highest_comp_level()) == CompLevel_full_optimization);
1228   // If we deopted, then we reprofile
1229   if (deopt && !is_method_profiled(method)) {
1230     return CompLevel_full_profile;
1231   }
1232 
1233   CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
1234   assert(ctd != nullptr, "Should have CTD for CompLevel_full_optimization");
1235   // With SkipTier2IfPossible and all deps satisfied, go to level 4 immediately
1236   if (SkipTier2IfPossible && ctd->init_deps_left() == 0) {
1237     if (method->method_data() == nullptr) {
1238       create_mdo(method, THREAD);
1239     }
1240     return CompLevel_full_optimization;
1241   }
1242 
1243   // Otherwise go to level 2
1244   return CompLevel_limited_profile;
1245 }
1246 
1247 
1248 CompLevel CompilationPolicy::trained_transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1249   precond(mtd != nullptr);
1250   precond(cur_level == CompLevel_limited_profile);
1251 
1252   // One of the main reasons that we can get here is that we're waiting for the stored C2 code to become ready.
1253 
1254   // But first, check if we have a saved profile
1255   bool training_has_profile = (mtd->final_profile() != nullptr);
1256   if (!training_has_profile) {
1257     return CompLevel_full_profile;
1258   }
1259 
1260 
1261   assert(training_has_profile, "Have to have a profile to be here");
1262   // Check if the method is ready
1263   CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
1264   if (ctd != nullptr && ctd->init_deps_left() == 0) {
1265     if (method->method_data() == nullptr) {
1266       create_mdo(method, THREAD);
1267     }
1268     return CompLevel_full_optimization;
1269   }
1270 
1271   // Otherwise stay at the current level
1272   return CompLevel_limited_profile;
1273 }
1274 
1275 
1276 CompLevel CompilationPolicy::trained_transition_from_full_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1277   precond(mtd != nullptr);
1278   precond(cur_level == CompLevel_full_profile);
1279 
1280   CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
1281   // We have method at the full profile level and we also know that it's possibly an important method.
1282   if (highest_training_level == CompLevel_full_optimization && !mtd->only_inlined()) {
1283     // Check if it is adequately profiled
1284     if (is_method_profiled(method)) {
1285       return CompLevel_full_optimization;
1286     }
1287   }
1288 
1289   // Otherwise stay at the current level
1290   return CompLevel_full_profile;
1291 }
1292 
1293 CompLevel CompilationPolicy::trained_transition(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1294   precond(MethodTrainingData::have_data());
1295 
1296   // If there is no training data recorded for this method, bail out.
1297   if (mtd == nullptr) {
1298     return cur_level;
1299   }
1300 
1301   CompLevel next_level = cur_level;
1302   switch(cur_level) {
1303     default: break;
1304     case CompLevel_none:
1305       next_level = trained_transition_from_none(method, cur_level, mtd, THREAD);
1306       break;
1307     case CompLevel_limited_profile:
1308       next_level = trained_transition_from_limited_profile(method, cur_level, mtd, THREAD);
1309       break;
1310     case CompLevel_full_profile:
1311       next_level = trained_transition_from_full_profile(method, cur_level, mtd, THREAD);
1312       break;
1313   }
1314 
1315   // We don't have any special strategies for the C2-only compilation modes, so just fix up the levels for now.
1316   if (CompilationModeFlag::high_only_quick_internal() && CompLevel_simple < next_level && next_level < CompLevel_full_optimization) {
1317     return CompLevel_none;
1318   }
1319   if (CompilationModeFlag::high_only() && next_level < CompLevel_full_optimization) {
1320     return CompLevel_none;
1321   }
1322   return (cur_level != next_level) ? limit_level(next_level) : cur_level;
1323 }
1324 
1325 /*
1326  * Method states:
1327  *   0 - interpreter (CompLevel_none)
1328  *   1 - pure C1 (CompLevel_simple)
1329  *   2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
1330  *   3 - C1 with full profiling (CompLevel_full_profile)
1331  *   4 - C2 or Graal (CompLevel_full_optimization)
1332  *
1333  * Common state transition patterns:
1334  * a. 0 -> 3 -> 4.
1335  *    The most common path. But note that even in this straightforward case
1336  *    profiling can start at level 0 and finish at level 3.
1337  *
1338  * b. 0 -> 2 -> 3 -> 4.
1339  *    This case occurs when the load on C2 is deemed too high. So, instead of transitioning
1340  *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
1341  *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
1342  *
1343  * c. 0 -> (3->2) -> 4.
1344  *    In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
1345  *    to enable the profiling to fully occur at level 0. In this case we change the compilation level
1346  *    of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
1347  *    without full profiling while c2 is compiling.
1348  *
1349  * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
1350  *    After a method was once compiled with C1 it can be identified as trivial and be compiled to
1351  *    level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
1352  *
1353  * e. 0 -> 4.
1354  *    This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
1355  *    or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
1356  *    the compiled version already exists).
1357  *
1358  * Note that since state 0 can be reached from any other state via deoptimization different loops
1359  * are possible.
1360  *
1361  */
1362 
1363 // Common transition function. Given a predicate determines if a method should transition to another level.
1364 template<typename Predicate>
1365 CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD, bool disable_feedback) {
1366   CompLevel next_level = cur_level;
1367 
1368   if (force_comp_at_level_simple(method)) {
1369     next_level = CompLevel_simple;
1370   } else if (is_trivial(method) || method->is_native()) {
1371     // We do not care if there is profiling data for these methods, throw them to compiler.
1372     next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_full_optimization : CompLevel_simple;
1373   } else if (MethodTrainingData::have_data()) {
1374     MethodTrainingData* mtd = MethodTrainingData::find_fast(method);
1375     if (mtd == nullptr) {
1376       // We haven't see compilations of this method in training. It's either very cold or the behavior changed.
1377       // Feed it to the standard TF with no profiling delay.
1378       next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
1379     } else {
1380       next_level = trained_transition(method, cur_level, mtd, THREAD);
1381       if (cur_level == next_level) {
1382         // trained_transtion() is going to return the same level if no startup/warmup optimizations apply.
1383         // In order to catch possible pathologies due to behavior change we feed the event to the regular
1384         // TF but with profiling delay.
1385         next_level = standard_transition<Predicate>(method, cur_level, true /*delay_profiling*/, disable_feedback);
1386       }
1387     }
1388   } else {
1389     next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
1390   }
1391   return (next_level != cur_level) ? limit_level(next_level) : next_level;
1392 }
1393 
1394 
1395 template<typename Predicate>
1396 CompLevel CompilationPolicy::standard_transition(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1397   CompLevel next_level = cur_level;
1398   switch(cur_level) {
1399   default: break;
1400   case CompLevel_none:
1401     next_level = transition_from_none<Predicate>(method, cur_level, delay_profiling, disable_feedback);
1402     break;
1403   case CompLevel_limited_profile:
1404     next_level = transition_from_limited_profile<Predicate>(method, cur_level, delay_profiling, disable_feedback);
1405     break;
1406   case CompLevel_full_profile:
1407     next_level = transition_from_full_profile<Predicate>(method, cur_level);
1408     break;
1409   }
1410   return next_level;
1411 }
1412 
1413 template<typename Predicate>
1414 CompLevel CompilationPolicy::transition_from_none(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1415   precond(cur_level == CompLevel_none);
1416   CompLevel next_level = cur_level;
1417   int i = method->invocation_count();
1418   int b = method->backedge_count();
1419   double scale = delay_profiling ? Tier0ProfileDelayFactor : 1.0;
1420   // If we were at full profile level, would we switch to full opt?
1421   if (transition_from_full_profile<Predicate>(method, CompLevel_full_profile) == CompLevel_full_optimization) {
1422     next_level = CompLevel_full_optimization;
1423   } else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply_scaled(method, cur_level, i, b, scale)) {
1424     // C1-generated fully profiled code is about 30% slower than the limited profile
1425     // code that has only invocation and backedge counters. The observation is that
1426     // if C2 queue is large enough we can spend too much time in the fully profiled code
1427     // while waiting for C2 to pick the method from the queue. To alleviate this problem
1428     // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
1429     // we choose to compile a limited profiled version and then recompile with full profiling
1430     // when the load on C2 goes down.
1431     if (delay_profiling || (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > Tier3DelayOn * compiler_count(CompLevel_full_optimization))) {
1432       next_level = CompLevel_limited_profile;
1433     } else {
1434       next_level = CompLevel_full_profile;
1435     }
1436   }
1437   return next_level;
1438 }
1439 
1440 template<typename Predicate>
1441 CompLevel CompilationPolicy::transition_from_full_profile(const methodHandle& method, CompLevel cur_level) {
1442   precond(cur_level == CompLevel_full_profile);
1443   CompLevel next_level = cur_level;
1444   MethodData* mdo = method->method_data();
1445   if (mdo != nullptr) {
1446     if (mdo->would_profile() || CompilationModeFlag::disable_intermediate()) {
1447       int mdo_i = mdo->invocation_count_delta();
1448       int mdo_b = mdo->backedge_count_delta();
1449       if (Predicate::apply(method, cur_level, mdo_i, mdo_b)) {
1450         next_level = CompLevel_full_optimization;
1451       }
1452     } else {
1453       next_level = CompLevel_full_optimization;
1454     }
1455   }
1456   return next_level;
1457 }
1458 
1459 template<typename Predicate>
1460 CompLevel CompilationPolicy::transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1461   precond(cur_level == CompLevel_limited_profile);
1462   CompLevel next_level = cur_level;
1463   int i = method->invocation_count();
1464   int b = method->backedge_count();
1465   double scale = delay_profiling ? Tier2ProfileDelayFactor : 1.0;
1466   MethodData* mdo = method->method_data();
1467   if (mdo != nullptr) {
1468     if (mdo->would_profile()) {
1469       if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1470                               Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1471                               Predicate::apply_scaled(method, cur_level, i, b, scale))) {
1472         next_level = CompLevel_full_profile;
1473       }
1474     } else {
1475       next_level = CompLevel_full_optimization;
1476     }
1477   } else {
1478     // If there is no MDO we need to profile
1479     if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1480                             Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1481                             Predicate::apply_scaled(method, cur_level, i, b, scale))) {
1482       next_level = CompLevel_full_profile;
1483     }
1484   }
1485   if (next_level == CompLevel_full_profile && is_method_profiled(method)) {
1486     next_level = CompLevel_full_optimization;
1487   }
1488   return next_level;
1489 }
1490 
1491 
1492 // Determine if a method should be compiled with a normal entry point at a different level.
1493 CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
1494   CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common<LoopPredicate>(method, cur_level, THREAD, true));
1495   CompLevel next_level = common<CallPredicate>(method, cur_level, THREAD, !TrainingData::have_data() && is_old(method));
1496 
1497   // If OSR method level is greater than the regular method level, the levels should be
1498   // equalized by raising the regular method level in order to avoid OSRs during each
1499   // invocation of the method.
1500   if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
1501     MethodData* mdo = method->method_data();
1502     guarantee(mdo != nullptr, "MDO should not be nullptr");
1503     if (mdo->invocation_count() >= 1) {
1504       next_level = CompLevel_full_optimization;
1505     }
1506   } else {
1507     next_level = MAX2(osr_level, next_level);
1508   }
1509 #if INCLUDE_JVMCI
1510   if (EnableJVMCI && UseJVMCICompiler &&
1511       next_level == CompLevel_full_optimization CDS_ONLY(&& !AOTLinkedClassBulkLoader::class_preloading_finished())) {
1512     next_level = cur_level;
1513   }
1514 #endif
1515   return next_level;
1516 }
1517 
1518 // Determine if we should do an OSR compilation of a given method.
1519 CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
1520   CompLevel next_level = common<LoopPredicate>(method, cur_level, THREAD, true);
1521   if (cur_level == CompLevel_none) {
1522     // If there is a live OSR method that means that we deopted to the interpreter
1523     // for the transition.
1524     CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
1525     if (osr_level > CompLevel_none) {
1526       return osr_level;
1527     }
1528   }
1529   return next_level;
1530 }
1531 
1532 // Handle the invocation event.
1533 void CompilationPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
1534                                                       CompLevel level, nmethod* nm, TRAPS) {
1535   if (should_create_mdo(mh, level)) {
1536     create_mdo(mh, THREAD);
1537   }
1538   CompLevel next_level = call_event(mh, level, THREAD);
1539   if (next_level != level) {
1540     if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
1541       compile(mh, InvocationEntryBci, next_level, THREAD);
1542     }
1543   }
1544 }
1545 
1546 // Handle the back branch event. Notice that we can compile the method
1547 // with a regular entry from here.
1548 void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
1549                                                      int bci, CompLevel level, nmethod* nm, TRAPS) {
1550   if (should_create_mdo(mh, level)) {
1551     create_mdo(mh, THREAD);
1552   }
1553   // Check if MDO should be created for the inlined method
1554   if (should_create_mdo(imh, level)) {
1555     create_mdo(imh, THREAD);
1556   }
1557 
1558   if (is_compilation_enabled()) {
1559     CompLevel next_osr_level = loop_event(imh, level, THREAD);
1560     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
1561     // At the very least compile the OSR version
1562     if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
1563       compile(imh, bci, next_osr_level, CHECK);
1564     }
1565 
1566     // Use loop event as an opportunity to also check if there's been
1567     // enough calls.
1568     CompLevel cur_level, next_level;
1569     if (mh() != imh()) { // If there is an enclosing method
1570       {
1571         guarantee(nm != nullptr, "Should have nmethod here");
1572         cur_level = comp_level(mh());
1573         next_level = call_event(mh, cur_level, THREAD);
1574 
1575         if (max_osr_level == CompLevel_full_optimization) {
1576           // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
1577           bool make_not_entrant = false;
1578           if (nm->is_osr_method()) {
1579             // This is an osr method, just make it not entrant and recompile later if needed
1580             make_not_entrant = true;
1581           } else {
1582             if (next_level != CompLevel_full_optimization) {
1583               // next_level is not full opt, so we need to recompile the
1584               // enclosing method without the inlinee
1585               cur_level = CompLevel_none;
1586               make_not_entrant = true;
1587             }
1588           }
1589           if (make_not_entrant) {
1590             if (PrintTieredEvents) {
1591               int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
1592               print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
1593             }
1594             nm->make_not_entrant("OSR invalidation, back branch");
1595           }
1596         }
1597         // Fix up next_level if necessary to avoid deopts
1598         if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
1599           next_level = CompLevel_full_profile;
1600         }
1601         if (cur_level != next_level) {
1602           if (!CompileBroker::compilation_is_in_queue(mh)) {
1603             compile(mh, InvocationEntryBci, next_level, THREAD);
1604           }
1605         }
1606       }
1607     } else {
1608       cur_level = comp_level(mh());
1609       next_level = call_event(mh, cur_level, THREAD);
1610       if (next_level != cur_level) {
1611         if (!CompileBroker::compilation_is_in_queue(mh)) {
1612           compile(mh, InvocationEntryBci, next_level, THREAD);
1613         }
1614       }
1615     }
1616   }
1617 }
1618