1 /*
   2  * Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/aotLinkedClassBulkLoader.hpp"
  27 #include "code/scopeDesc.hpp"
  28 #include "code/SCCache.hpp"
  29 #include "compiler/compilationPolicy.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compilerDefinitions.inline.hpp"
  32 #include "compiler/compilerOracle.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/method.inline.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "oops/recompilationSchedule.hpp"
  38 #include "oops/trainingData.hpp"
  39 #include "prims/jvmtiExport.hpp"
  40 #include "runtime/arguments.hpp"
  41 #include "runtime/deoptimization.hpp"
  42 #include "runtime/frame.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/globals_extension.hpp"
  45 #include "runtime/handles.inline.hpp"
  46 #include "runtime/safepoint.hpp"
  47 #include "runtime/safepointVerifiers.hpp"
  48 #ifdef COMPILER1
  49 #include "c1/c1_Compiler.hpp"
  50 #endif
  51 #ifdef COMPILER2
  52 #include "opto/c2compiler.hpp"
  53 #endif
  54 #if INCLUDE_JVMCI
  55 #include "jvmci/jvmci.hpp"
  56 #endif
  57 
  58 int64_t CompilationPolicy::_start_time = 0;
  59 int CompilationPolicy::_c1_count = 0;
  60 int CompilationPolicy::_c2_count = 0;
  61 int CompilationPolicy::_c3_count = 0;
  62 int CompilationPolicy::_sc_count = 0;
  63 double CompilationPolicy::_increase_threshold_at_ratio = 0;
  64 
  65 CompilationPolicy::LoadAverage CompilationPolicy::_load_average;
  66 CompilationPolicy::TrainingReplayQueue CompilationPolicy::_training_replay_queue;
  67 volatile bool CompilationPolicy::_recompilation_done = false;
  68 
  69 void compilationPolicy_init() {
  70   CompilationPolicy::initialize();
  71 }
  72 
  73 int CompilationPolicy::compiler_count(CompLevel comp_level) {
  74   if (is_c1_compile(comp_level)) {
  75     return c1_count();
  76   } else if (is_c2_compile(comp_level)) {
  77     return c2_count();
  78   }
  79   return 0;
  80 }
  81 
  82 void CompilationPolicy::sample_load_average() {
  83   const int c2_queue_size = CompileBroker::queue_size(CompLevel_full_optimization);
  84   _load_average.sample(c2_queue_size);
  85 }
  86 
  87 bool CompilationPolicy::have_recompilation_work() {
  88   if (UseRecompilation && TrainingData::have_data() && RecompilationSchedule::have_schedule() &&
  89                           RecompilationSchedule::length() > 0 && !_recompilation_done) {
  90     if (_load_average.value() <= RecompilationLoadAverageThreshold) {
  91       return true;
  92     }
  93   }
  94   return false;
  95 }
  96 
  97 bool CompilationPolicy::recompilation_step(int step, TRAPS) {
  98   if (!have_recompilation_work() || os::elapsedTime() < DelayRecompilation) {
  99     return false;
 100   }
 101 
 102   const int size = RecompilationSchedule::length();
 103   int i = 0;
 104   int count = 0;
 105   bool repeat = false;
 106   for (; i < size && count < step; i++) {
 107     if (!RecompilationSchedule::status_at(i)) {
 108       MethodTrainingData* mtd = RecompilationSchedule::at(i);
 109       if (!mtd->has_holder()) {
 110         RecompilationSchedule::set_status_at(i, true);
 111         continue;
 112       }
 113       const Method* method = mtd->holder();
 114       InstanceKlass* klass = method->method_holder();
 115       if (klass->is_not_initialized()) {
 116         repeat = true;
 117         continue;
 118       }
 119       nmethod *nm = method->code();
 120       if (nm == nullptr) {
 121         repeat = true;
 122         continue;
 123       }
 124 
 125       if (!ForceRecompilation && !(nm->is_scc() && nm->comp_level() == CompLevel_full_optimization)) {
 126         // If it's already online-compiled at level 4, mark it as done.
 127         if (nm->comp_level() == CompLevel_full_optimization) {
 128           RecompilationSchedule::set_status_at(i, true);
 129         } else {
 130           repeat = true;
 131         }
 132         continue;
 133       }
 134       if (RecompilationSchedule::claim_at(i)) {
 135         const methodHandle m(THREAD, const_cast<Method*>(method));
 136         CompLevel next_level = CompLevel_full_optimization;
 137 
 138         if (method->method_data() == nullptr) {
 139           create_mdo(m, THREAD);
 140         }
 141 
 142         if (PrintTieredEvents) {
 143           print_event(FORCE_RECOMPILE, m(), m(), InvocationEntryBci, next_level);
 144         }
 145         CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_full_optimization, methodHandle(), 0,
 146                                       true /*requires_online_compilation*/, CompileTask::Reason_MustBeCompiled, THREAD);
 147         if (HAS_PENDING_EXCEPTION) {
 148           CLEAR_PENDING_EXCEPTION;
 149         }
 150         count++;
 151       }
 152     }
 153   }
 154 
 155   if (i == size && !repeat) {
 156     Atomic::release_store(&_recompilation_done, true);
 157   }
 158   return count > 0;
 159 }
 160 
 161 // Returns true if m must be compiled before executing it
 162 // This is intended to force compiles for methods (usually for
 163 // debugging) that would otherwise be interpreted for some reason.
 164 bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
 165   // Don't allow Xcomp to cause compiles in replay mode
 166   if (ReplayCompiles) return false;
 167 
 168   if (m->has_compiled_code()) return false;       // already compiled
 169   if (!can_be_compiled(m, comp_level)) return false;
 170 
 171   return !UseInterpreter ||                                                                        // must compile all methods
 172          (AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
 173 }
 174 
 175 void CompilationPolicy::maybe_compile_early(const methodHandle& m, TRAPS) {
 176   if (m->method_holder()->is_not_initialized()) {
 177     // 'is_not_initialized' means not only '!is_initialized', but also that
 178     // initialization has not been started yet ('!being_initialized')
 179     // Do not force compilation of methods in uninitialized classes.
 180     return;
 181   }
 182   if (!m->is_native() && MethodTrainingData::have_data()) {
 183     MethodTrainingData* mtd = MethodTrainingData::find(m);
 184     if (mtd == nullptr) {
 185       return;              // there is no training data recorded for m
 186     }
 187     bool recompile = m->code_has_clinit_barriers();
 188     CompLevel cur_level = static_cast<CompLevel>(m->highest_comp_level());
 189     CompLevel next_level = trained_transition(m, cur_level, mtd, THREAD);
 190     if ((next_level != cur_level || recompile) && can_be_compiled(m, next_level) && !CompileBroker::compilation_is_in_queue(m)) {
 191       bool requires_online_compilation = false;
 192       CompileTrainingData* ctd = mtd->last_toplevel_compile(next_level);
 193       if (ctd != nullptr) {
 194         requires_online_compilation = (ctd->init_deps_left() > 0);
 195       }
 196       if (requires_online_compilation && recompile) {
 197         return;
 198       }
 199       if (PrintTieredEvents) {
 200         print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, next_level);
 201       }
 202       CompileBroker::compile_method(m, InvocationEntryBci, next_level, methodHandle(), 0, requires_online_compilation, CompileTask::Reason_MustBeCompiled, THREAD);
 203       if (HAS_PENDING_EXCEPTION) {
 204         CLEAR_PENDING_EXCEPTION;
 205       }
 206     }
 207   }
 208 }
 209 
 210 void CompilationPolicy::maybe_compile_early_after_init(const methodHandle& m, TRAPS) {
 211   assert(m->method_holder()->is_initialized(), "Should be called after class initialization");
 212   maybe_compile_early(m, THREAD);
 213 }
 214 
 215 void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
 216   if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
 217     // don't force compilation, resolve was on behalf of compiler
 218     return;
 219   }
 220   if (m->method_holder()->is_not_initialized()) {
 221     // 'is_not_initialized' means not only '!is_initialized', but also that
 222     // initialization has not been started yet ('!being_initialized')
 223     // Do not force compilation of methods in uninitialized classes.
 224     // Note that doing this would throw an assert later,
 225     // in CompileBroker::compile_method.
 226     // We sometimes use the link resolver to do reflective lookups
 227     // even before classes are initialized.
 228     return;
 229   }
 230 
 231   if (must_be_compiled(m)) {
 232     // This path is unusual, mostly used by the '-Xcomp' stress test mode.
 233     CompLevel level = initial_compile_level(m);
 234     if (PrintTieredEvents) {
 235       print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, level);
 236     }
 237     CompileBroker::compile_method(m, InvocationEntryBci, level, methodHandle(), 0, false, CompileTask::Reason_MustBeCompiled, THREAD);
 238   }
 239 }
 240 
 241 void CompilationPolicy::replay_training_at_init_impl(InstanceKlass* klass, TRAPS) {
 242   if (!klass->has_init_deps_processed()) {
 243     ResourceMark rm;
 244     log_debug(training)("Replay training: %s", klass->external_name());
 245 
 246     KlassTrainingData* ktd = KlassTrainingData::find(klass);
 247     if (ktd != nullptr) {
 248       guarantee(ktd->has_holder(), "");
 249       ktd->notice_fully_initialized(); // sets klass->has_init_deps_processed bit
 250       assert(klass->has_init_deps_processed(), "");
 251 
 252       ktd->iterate_all_comp_deps([&](CompileTrainingData* ctd) {
 253         if (ctd->init_deps_left() == 0) {
 254           MethodTrainingData* mtd = ctd->method();
 255           if (mtd->has_holder()) {
 256             const methodHandle mh(THREAD, const_cast<Method*>(mtd->holder()));
 257             CompilationPolicy::maybe_compile_early(mh, THREAD);
 258           }
 259         }
 260       });
 261     }
 262     Array<Method*>* methods = klass->methods();
 263     for (int i = 0; i < methods->length(); i++) {
 264       const methodHandle mh(THREAD, methods->at(i));
 265       CompilationPolicy::maybe_compile_early_after_init(mh, THREAD);
 266     }
 267   }
 268 }
 269 
 270 void CompilationPolicy::replay_training_at_init(bool is_on_shutdown, TRAPS) {
 271   // Drain pending queue when no concurrent processing thread is present.
 272   if (UseConcurrentTrainingReplay) {
 273     if (VerifyTrainingData) {
 274       MonitorLocker locker(THREAD, TrainingReplayQueue_lock);
 275       while (!_training_replay_queue.is_empty_unlocked()) {
 276         locker.wait(); // let the replay training thread drain the queue
 277       }
 278     }
 279   } else {
 280     do {
 281       InstanceKlass* pending = _training_replay_queue.try_pop(TrainingReplayQueue_lock, THREAD);
 282       if (pending == nullptr) {
 283         break; // drained the queue
 284       }
 285       if (is_on_shutdown) {
 286         LogStreamHandle(Warning, training) log;
 287         if (log.is_enabled()) {
 288           ResourceMark rm;
 289           log.print("pending training replay request: %s%s",
 290                     pending->external_name(), (pending->has_preinitialized_mirror() ? " (preinitialized)" : ""));
 291         }
 292       }
 293       replay_training_at_init_impl(pending, THREAD);
 294     } while (true);
 295   }
 296 
 297   if (VerifyTrainingData) {
 298     TrainingData::verify();
 299   }
 300 }
 301 
 302 void CompilationPolicy::replay_training_at_init(InstanceKlass* klass, TRAPS) {
 303   assert(klass->is_initialized(), "");
 304   if (TrainingData::have_data() && klass->is_shared() &&
 305       (CompileBroker::replay_initialized() || !klass->has_preinitialized_mirror())) { // ignore preloaded classes during early startup
 306     if (UseConcurrentTrainingReplay || !CompileBroker::replay_initialized()) {
 307       _training_replay_queue.push(klass, TrainingReplayQueue_lock, THREAD);
 308     } else {
 309       replay_training_at_init_impl(klass, THREAD);
 310     }
 311     assert(!HAS_PENDING_EXCEPTION, "");
 312   }
 313 }
 314 
 315 // For TrainingReplayQueue
 316 template<>
 317 void CompilationPolicyUtils::Queue<InstanceKlass>::print_on(outputStream* st) {
 318   int pos = 0;
 319   for (QueueNode* cur = _head; cur != nullptr; cur = cur->next()) {
 320     ResourceMark rm;
 321     InstanceKlass* ik = cur->value();
 322     st->print_cr("%3d: " INTPTR_FORMAT " %s", ++pos, p2i(ik), ik->external_name());
 323   }
 324 }
 325 
 326 void CompilationPolicy::replay_training_at_init_loop(TRAPS) {
 327   precond(UseConcurrentTrainingReplay);
 328 
 329   while (!CompileBroker::is_compilation_disabled_forever() || VerifyTrainingData) {
 330     InstanceKlass* ik = _training_replay_queue.pop(TrainingReplayQueue_lock, THREAD);
 331     replay_training_at_init_impl(ik, THREAD);
 332   }
 333 }
 334 
 335 static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_level) {
 336   if (comp_level == CompLevel_any) {
 337      if (CompilerConfig::is_c1_only()) {
 338        comp_level = CompLevel_simple;
 339      } else if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
 340        comp_level = CompLevel_full_optimization;
 341      }
 342   }
 343   return comp_level;
 344 }
 345 
 346 // Returns true if m is allowed to be compiled
 347 bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
 348   // allow any levels for WhiteBox
 349   assert(WhiteBoxAPI || comp_level == CompLevel_any || is_compile(comp_level), "illegal compilation level %d", comp_level);
 350 
 351   if (m->is_abstract()) return false;
 352   if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
 353 
 354   // Math intrinsics should never be compiled as this can lead to
 355   // monotonicity problems because the interpreter will prefer the
 356   // compiled code to the intrinsic version.  This can't happen in
 357   // production because the invocation counter can't be incremented
 358   // but we shouldn't expose the system to this problem in testing
 359   // modes.
 360   if (!AbstractInterpreter::can_be_compiled(m)) {
 361     return false;
 362   }
 363   comp_level = adjust_level_for_compilability_query((CompLevel) comp_level);
 364   if (comp_level == CompLevel_any || is_compile(comp_level)) {
 365     return !m->is_not_compilable(comp_level);
 366   }
 367   return false;
 368 }
 369 
 370 // Returns true if m is allowed to be osr compiled
 371 bool CompilationPolicy::can_be_osr_compiled(const methodHandle& m, int comp_level) {
 372   bool result = false;
 373   comp_level = adjust_level_for_compilability_query((CompLevel) comp_level);
 374   if (comp_level == CompLevel_any || is_compile(comp_level)) {
 375     result = !m->is_not_osr_compilable(comp_level);
 376   }
 377   return (result && can_be_compiled(m, comp_level));
 378 }
 379 
 380 bool CompilationPolicy::is_compilation_enabled() {
 381   // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
 382   return CompileBroker::should_compile_new_jobs();
 383 }
 384 
 385 CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue) {
 386   // Remove unloaded methods from the queue
 387   for (CompileTask* task = compile_queue->first(); task != nullptr; ) {
 388     CompileTask* next = task->next();
 389     if (task->is_unloaded()) {
 390       compile_queue->remove_and_mark_stale(task);
 391     }
 392     task = next;
 393   }
 394 #if INCLUDE_JVMCI
 395   if (UseJVMCICompiler && !BackgroundCompilation) {
 396     /*
 397      * In blocking compilation mode, the CompileBroker will make
 398      * compilations submitted by a JVMCI compiler thread non-blocking. These
 399      * compilations should be scheduled after all blocking compilations
 400      * to service non-compiler related compilations sooner and reduce the
 401      * chance of such compilations timing out.
 402      */
 403     for (CompileTask* task = compile_queue->first(); task != nullptr; task = task->next()) {
 404       if (task->is_blocking()) {
 405         return task;
 406       }
 407     }
 408   }
 409 #endif
 410   return compile_queue->first();
 411 }
 412 
 413 // Simple methods are as good being compiled with C1 as C2.
 414 // Determine if a given method is such a case.
 415 bool CompilationPolicy::is_trivial(const methodHandle& method) {
 416   if (method->is_accessor() ||
 417       method->is_constant_getter()) {
 418     return true;
 419   }
 420   return false;
 421 }
 422 
 423 bool CompilationPolicy::force_comp_at_level_simple(const methodHandle& method) {
 424   if (CompilationModeFlag::quick_internal()) {
 425 #if INCLUDE_JVMCI
 426     if (UseJVMCICompiler) {
 427       AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
 428       if (comp != nullptr && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
 429         return !SCCache::is_C3_on();
 430       }
 431     }
 432 #endif
 433   }
 434   return false;
 435 }
 436 
 437 CompLevel CompilationPolicy::comp_level(Method* method) {
 438   nmethod *nm = method->code();
 439   if (nm != nullptr && nm->is_in_use()) {
 440     return (CompLevel)nm->comp_level();
 441   }
 442   return CompLevel_none;
 443 }
 444 
 445 // Call and loop predicates determine whether a transition to a higher
 446 // compilation level should be performed (pointers to predicate functions
 447 // are passed to common()).
 448 // Tier?LoadFeedback is basically a coefficient that determines of
 449 // how many methods per compiler thread can be in the queue before
 450 // the threshold values double.
 451 class LoopPredicate : AllStatic {
 452 public:
 453   static bool apply_scaled(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
 454     double threshold_scaling;
 455     if (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, threshold_scaling)) {
 456       scale *= threshold_scaling;
 457     }
 458     switch(cur_level) {
 459     case CompLevel_none:
 460     case CompLevel_limited_profile:
 461       return b >= Tier3BackEdgeThreshold * scale;
 462     case CompLevel_full_profile:
 463       return b >= Tier4BackEdgeThreshold * scale;
 464     default:
 465       return true;
 466     }
 467   }
 468 
 469   static bool apply(const methodHandle& method, CompLevel cur_level, int i, int b) {
 470     double k = 1;
 471     switch(cur_level) {
 472     case CompLevel_none:
 473     // Fall through
 474     case CompLevel_limited_profile: {
 475       k = CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 476       break;
 477     }
 478     case CompLevel_full_profile: {
 479       k = CompilationPolicy::threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 480       break;
 481     }
 482     default:
 483       return true;
 484     }
 485     return apply_scaled(method, cur_level, i, b, k);
 486   }
 487 };
 488 
 489 class CallPredicate : AllStatic {
 490 public:
 491   static bool apply_scaled(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
 492     double threshold_scaling;
 493     if (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, threshold_scaling)) {
 494       scale *= threshold_scaling;
 495     }
 496     switch(cur_level) {
 497     case CompLevel_none:
 498     case CompLevel_limited_profile:
 499       return (i >= Tier3InvocationThreshold * scale) ||
 500              (i >= Tier3MinInvocationThreshold * scale && i + b >= Tier3CompileThreshold * scale);
 501     case CompLevel_full_profile:
 502       return (i >= Tier4InvocationThreshold * scale) ||
 503              (i >= Tier4MinInvocationThreshold * scale && i + b >= Tier4CompileThreshold * scale);
 504     default:
 505      return true;
 506     }
 507   }
 508 
 509   static bool apply(const methodHandle& method, CompLevel cur_level, int i, int b) {
 510     double k = 1;
 511     switch(cur_level) {
 512     case CompLevel_none:
 513     case CompLevel_limited_profile: {
 514       k = CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 515       break;
 516     }
 517     case CompLevel_full_profile: {
 518       k = CompilationPolicy::threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 519       break;
 520     }
 521     default:
 522       return true;
 523     }
 524     return apply_scaled(method, cur_level, i, b, k);
 525   }
 526 };
 527 
 528 double CompilationPolicy::threshold_scale(CompLevel level, int feedback_k) {
 529   int comp_count = compiler_count(level);
 530   if (comp_count > 0) {
 531     double queue_size = CompileBroker::queue_size(level);
 532     double k = (double)queue_size / ((double)feedback_k * (double)comp_count) + 1;
 533 
 534     // Increase C1 compile threshold when the code cache is filled more
 535     // than specified by IncreaseFirstTierCompileThresholdAt percentage.
 536     // The main intention is to keep enough free space for C2 compiled code
 537     // to achieve peak performance if the code cache is under stress.
 538     if (CompilerConfig::is_tiered() && !CompilationModeFlag::disable_intermediate() && is_c1_compile(level))  {
 539       double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
 540       if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
 541         k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
 542       }
 543     }
 544     return k;
 545   }
 546   return 1;
 547 }
 548 
 549 void CompilationPolicy::print_counters(const char* prefix, Method* m) {
 550   int invocation_count = m->invocation_count();
 551   int backedge_count = m->backedge_count();
 552   MethodData* mdh = m->method_data();
 553   int mdo_invocations = 0, mdo_backedges = 0;
 554   int mdo_invocations_start = 0, mdo_backedges_start = 0;
 555   if (mdh != nullptr) {
 556     mdo_invocations = mdh->invocation_count();
 557     mdo_backedges = mdh->backedge_count();
 558     mdo_invocations_start = mdh->invocation_count_start();
 559     mdo_backedges_start = mdh->backedge_count_start();
 560   }
 561   tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix,
 562       invocation_count, backedge_count, prefix,
 563       mdo_invocations, mdo_invocations_start,
 564       mdo_backedges, mdo_backedges_start);
 565   tty->print(" %smax levels=%d,%d", prefix,
 566       m->highest_comp_level(), m->highest_osr_comp_level());
 567 }
 568 
 569 void CompilationPolicy::print_training_data(const char* prefix, Method* method) {
 570   methodHandle m(Thread::current(), method);
 571   tty->print(" %smtd: ", prefix);
 572   MethodTrainingData* mtd = MethodTrainingData::find(m);
 573   if (mtd == nullptr) {
 574     tty->print("null");
 575   } else {
 576     MethodData* md = mtd->final_profile();
 577     tty->print("mdo=");
 578     if (md == nullptr) {
 579       tty->print("null");
 580     } else {
 581       int mdo_invocations = md->invocation_count();
 582       int mdo_backedges = md->backedge_count();
 583       int mdo_invocations_start = md->invocation_count_start();
 584       int mdo_backedges_start = md->backedge_count_start();
 585       tty->print("%d(%d), %d(%d)", mdo_invocations, mdo_invocations_start, mdo_backedges, mdo_backedges_start);
 586     }
 587     CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
 588     tty->print(", deps=");
 589     if (ctd == nullptr) {
 590       tty->print("null");
 591     } else {
 592       tty->print("%d", ctd->init_deps_left());
 593     }
 594   }
 595 }
 596 
 597 // Print an event.
 598 void CompilationPolicy::print_event(EventType type, Method* m, Method* im, int bci, CompLevel level) {
 599   bool inlinee_event = m != im;
 600 
 601   ttyLocker tty_lock;
 602   tty->print("%lf: [", os::elapsedTime());
 603 
 604   switch(type) {
 605   case CALL:
 606     tty->print("call");
 607     break;
 608   case LOOP:
 609     tty->print("loop");
 610     break;
 611   case COMPILE:
 612     tty->print("compile");
 613     break;
 614   case FORCE_COMPILE:
 615     tty->print("force-compile");
 616     break;
 617   case FORCE_RECOMPILE:
 618     tty->print("force-recompile");
 619     break;
 620   case REMOVE_FROM_QUEUE:
 621     tty->print("remove-from-queue");
 622     break;
 623   case UPDATE_IN_QUEUE:
 624     tty->print("update-in-queue");
 625     break;
 626   case REPROFILE:
 627     tty->print("reprofile");
 628     break;
 629   case MAKE_NOT_ENTRANT:
 630     tty->print("make-not-entrant");
 631     break;
 632   default:
 633     tty->print("unknown");
 634   }
 635 
 636   tty->print(" level=%d ", level);
 637 
 638   ResourceMark rm;
 639   char *method_name = m->name_and_sig_as_C_string();
 640   tty->print("[%s", method_name);
 641   if (inlinee_event) {
 642     char *inlinee_name = im->name_and_sig_as_C_string();
 643     tty->print(" [%s]] ", inlinee_name);
 644   }
 645   else tty->print("] ");
 646   tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
 647                                       CompileBroker::queue_size(CompLevel_full_optimization));
 648 
 649   tty->print(" rate=");
 650   if (m->prev_time() == 0) tty->print("n/a");
 651   else tty->print("%f", m->rate());
 652   tty->print(" load=%lf", _load_average.value());
 653 
 654   tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
 655                                threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
 656 
 657   if (type != COMPILE) {
 658     print_counters("", m);
 659     if (inlinee_event) {
 660       print_counters("inlinee ", im);
 661     }
 662     tty->print(" compilable=");
 663     bool need_comma = false;
 664     if (!m->is_not_compilable(CompLevel_full_profile)) {
 665       tty->print("c1");
 666       need_comma = true;
 667     }
 668     if (!m->is_not_osr_compilable(CompLevel_full_profile)) {
 669       if (need_comma) tty->print(",");
 670       tty->print("c1-osr");
 671       need_comma = true;
 672     }
 673     if (!m->is_not_compilable(CompLevel_full_optimization)) {
 674       if (need_comma) tty->print(",");
 675       tty->print("c2");
 676       need_comma = true;
 677     }
 678     if (!m->is_not_osr_compilable(CompLevel_full_optimization)) {
 679       if (need_comma) tty->print(",");
 680       tty->print("c2-osr");
 681     }
 682     tty->print(" status=");
 683     if (m->queued_for_compilation()) {
 684       tty->print("in-queue");
 685     } else tty->print("idle");
 686     print_training_data("", m);
 687     if (inlinee_event) {
 688       print_training_data("inlinee ", im);
 689     }
 690   }
 691   tty->print_cr("]");
 692 }
 693 
 694 void CompilationPolicy::initialize() {
 695   if (!CompilerConfig::is_interpreter_only()) {
 696     int count = CICompilerCount;
 697     bool c1_only = CompilerConfig::is_c1_only();
 698     bool c2_only = CompilerConfig::is_c2_or_jvmci_compiler_only();
 699 
 700 #ifdef _LP64
 701     // Turn on ergonomic compiler count selection
 702     if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
 703       FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
 704     }
 705     if (CICompilerCountPerCPU) {
 706       // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
 707       int log_cpu = log2i(os::active_processor_count());
 708       int loglog_cpu = log2i(MAX2(log_cpu, 1));
 709       count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2);
 710       // Make sure there is enough space in the code cache to hold all the compiler buffers
 711       size_t c1_size = 0;
 712 #ifdef COMPILER1
 713       c1_size = Compiler::code_buffer_size();
 714 #endif
 715       size_t c2_size = 0;
 716 #ifdef COMPILER2
 717       c2_size = C2Compiler::initial_code_buffer_size();
 718 #endif
 719       size_t buffer_size = c1_only ? c1_size : (c1_size/3 + 2*c2_size/3);
 720       int max_count = (ReservedCodeCacheSize - (int)CompilerConfig::min_code_cache_size()) / (int)buffer_size;
 721       if (count > max_count) {
 722         // Lower the compiler count such that all buffers fit into the code cache
 723         count = MAX2(max_count, c1_only ? 1 : 2);
 724       }
 725       FLAG_SET_ERGO(CICompilerCount, count);
 726     }
 727 #else
 728     // On 32-bit systems, the number of compiler threads is limited to 3.
 729     // On these systems, the virtual address space available to the JVM
 730     // is usually limited to 2-4 GB (the exact value depends on the platform).
 731     // As the compilers (especially C2) can consume a large amount of
 732     // memory, scaling the number of compiler threads with the number of
 733     // available cores can result in the exhaustion of the address space
 734     /// available to the VM and thus cause the VM to crash.
 735     if (FLAG_IS_DEFAULT(CICompilerCount)) {
 736       count = 3;
 737       FLAG_SET_ERGO(CICompilerCount, count);
 738     }
 739 #endif
 740 
 741     if (c1_only) {
 742       // No C2 compiler thread required
 743       set_c1_count(count);
 744     } else if (c2_only) {
 745       set_c2_count(count);
 746     } else {
 747 #if INCLUDE_JVMCI
 748       if (UseJVMCICompiler && UseJVMCINativeLibrary) {
 749         int libjvmci_count = MAX2((int) (count * JVMCINativeLibraryThreadFraction), 1);
 750         int c1_count = MAX2(count - libjvmci_count, 1);
 751         set_c2_count(libjvmci_count);
 752         set_c1_count(c1_count);
 753       } else if (SCCache::is_C3_on()) {
 754         set_c1_count(MAX2(count / 3, 1));
 755         set_c2_count(MAX2(count - c1_count(), 1));
 756         set_c3_count(1);
 757       } else
 758 #endif
 759       {
 760         set_c1_count(MAX2(count / 3, 1));
 761         set_c2_count(MAX2(count - c1_count(), 1));
 762       }
 763     }
 764     if (SCCache::is_code_load_thread_on()) {
 765       set_sc_count((c1_only || c2_only) ? 1 : 2); // At minimum we need 2 threads to load C1 and C2 cached code in parallel
 766     }
 767     assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
 768     set_increase_threshold_at_ratio();
 769   }
 770 
 771   set_start_time(nanos_to_millis(os::javaTimeNanos()));
 772 }
 773 
 774 
 775 
 776 
 777 #ifdef ASSERT
 778 bool CompilationPolicy::verify_level(CompLevel level) {
 779   if (TieredCompilation && level > TieredStopAtLevel) {
 780     return false;
 781   }
 782   // Check if there is a compiler to process the requested level
 783   if (!CompilerConfig::is_c1_enabled() && is_c1_compile(level)) {
 784     return false;
 785   }
 786   if (!CompilerConfig::is_c2_or_jvmci_compiler_enabled() && is_c2_compile(level)) {
 787     return false;
 788   }
 789 
 790   // Interpreter level is always valid.
 791   if (level == CompLevel_none) {
 792     return true;
 793   }
 794   if (CompilationModeFlag::normal()) {
 795     return true;
 796   } else if (CompilationModeFlag::quick_only()) {
 797     return level == CompLevel_simple;
 798   } else if (CompilationModeFlag::high_only()) {
 799     return level == CompLevel_full_optimization;
 800   } else if (CompilationModeFlag::high_only_quick_internal()) {
 801     return level == CompLevel_full_optimization || level == CompLevel_simple;
 802   }
 803   return false;
 804 }
 805 #endif
 806 
 807 
 808 CompLevel CompilationPolicy::highest_compile_level() {
 809   CompLevel level = CompLevel_none;
 810   // Setup the maximum level available for the current compiler configuration.
 811   if (!CompilerConfig::is_interpreter_only()) {
 812     if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
 813       level = CompLevel_full_optimization;
 814     } else if (CompilerConfig::is_c1_enabled()) {
 815       if (CompilerConfig::is_c1_simple_only()) {
 816         level = CompLevel_simple;
 817       } else {
 818         level = CompLevel_full_profile;
 819       }
 820     }
 821   }
 822   // Clamp the maximum level with TieredStopAtLevel.
 823   if (TieredCompilation) {
 824     level = MIN2(level, (CompLevel) TieredStopAtLevel);
 825   }
 826 
 827   // Fix it up if after the clamping it has become invalid.
 828   // Bring it monotonically down depending on the next available level for
 829   // the compilation mode.
 830   if (!CompilationModeFlag::normal()) {
 831     // a) quick_only - levels 2,3,4 are invalid; levels -1,0,1 are valid;
 832     // b) high_only - levels 1,2,3 are invalid; levels -1,0,4 are valid;
 833     // c) high_only_quick_internal - levels 2,3 are invalid; levels -1,0,1,4 are valid.
 834     if (CompilationModeFlag::quick_only()) {
 835       if (level == CompLevel_limited_profile || level == CompLevel_full_profile || level == CompLevel_full_optimization) {
 836         level = CompLevel_simple;
 837       }
 838     } else if (CompilationModeFlag::high_only()) {
 839       if (level == CompLevel_simple || level == CompLevel_limited_profile || level == CompLevel_full_profile) {
 840         level = CompLevel_none;
 841       }
 842     } else if (CompilationModeFlag::high_only_quick_internal()) {
 843       if (level == CompLevel_limited_profile || level == CompLevel_full_profile) {
 844         level = CompLevel_simple;
 845       }
 846     }
 847   }
 848 
 849   assert(verify_level(level), "Invalid highest compilation level: %d", level);
 850   return level;
 851 }
 852 
 853 CompLevel CompilationPolicy::limit_level(CompLevel level) {
 854   level = MIN2(level, highest_compile_level());
 855   assert(verify_level(level), "Invalid compilation level: %d", level);
 856   return level;
 857 }
 858 
 859 CompLevel CompilationPolicy::initial_compile_level(const methodHandle& method) {
 860   CompLevel level = CompLevel_any;
 861   if (CompilationModeFlag::normal()) {
 862     level = CompLevel_full_profile;
 863   } else if (CompilationModeFlag::quick_only()) {
 864     level = CompLevel_simple;
 865   } else if (CompilationModeFlag::high_only()) {
 866     level = CompLevel_full_optimization;
 867   } else if (CompilationModeFlag::high_only_quick_internal()) {
 868     if (force_comp_at_level_simple(method)) {
 869       level = CompLevel_simple;
 870     } else {
 871       level = CompLevel_full_optimization;
 872     }
 873   }
 874   assert(level != CompLevel_any, "Unhandled compilation mode");
 875   return limit_level(level);
 876 }
 877 
 878 // Set carry flags on the counters if necessary
 879 void CompilationPolicy::handle_counter_overflow(const methodHandle& method) {
 880   MethodCounters *mcs = method->method_counters();
 881   if (mcs != nullptr) {
 882     mcs->invocation_counter()->set_carry_on_overflow();
 883     mcs->backedge_counter()->set_carry_on_overflow();
 884   }
 885   MethodData* mdo = method->method_data();
 886   if (mdo != nullptr) {
 887     mdo->invocation_counter()->set_carry_on_overflow();
 888     mdo->backedge_counter()->set_carry_on_overflow();
 889   }
 890 }
 891 
 892 // Called with the queue locked and with at least one element
 893 CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue, JavaThread* THREAD) {
 894   CompileTask *max_blocking_task = nullptr;
 895   CompileTask *max_task = nullptr;
 896   Method* max_method = nullptr;
 897 
 898   int64_t t = nanos_to_millis(os::javaTimeNanos());
 899   // Iterate through the queue and find a method with a maximum rate.
 900   for (CompileTask* task = compile_queue->first(); task != nullptr;) {
 901     CompileTask* next_task = task->next();
 902     // If a method was unloaded or has been stale for some time, remove it from the queue.
 903     // Blocking tasks and tasks submitted from whitebox API don't become stale
 904     if (task->is_unloaded()) {
 905       compile_queue->remove_and_mark_stale(task);
 906       task = next_task;
 907       continue;
 908     }
 909     if (task->is_scc()) {
 910       // SCC tasks are on separate queue, and they should load fast. There is no need to walk
 911       // the rest of the queue, just take the task and go.
 912       return task;
 913     }
 914     Method* method = task->method();
 915     methodHandle mh(THREAD, method);
 916     if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, mh) && !is_old(mh)) {
 917       if (PrintTieredEvents) {
 918         print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
 919       }
 920       method->clear_queued_for_compilation();
 921       method->set_pending_queue_processed(false);
 922       compile_queue->remove_and_mark_stale(task);
 923       task = next_task;
 924       continue;
 925     }
 926     update_rate(t, mh);
 927     if (max_task == nullptr || compare_methods(method, max_method) || compare_tasks(task, max_task)) {
 928       // Select a method with the highest rate
 929       max_task = task;
 930       max_method = method;
 931     }
 932 
 933     if (task->is_blocking()) {
 934       if (max_blocking_task == nullptr || compare_methods(method, max_blocking_task->method())) {
 935         max_blocking_task = task;
 936       }
 937     }
 938 
 939     task = next_task;
 940   }
 941 
 942   if (max_blocking_task != nullptr) {
 943     // In blocking compilation mode, the CompileBroker will make
 944     // compilations submitted by a JVMCI compiler thread non-blocking. These
 945     // compilations should be scheduled after all blocking compilations
 946     // to service non-compiler related compilations sooner and reduce the
 947     // chance of such compilations timing out.
 948     max_task = max_blocking_task;
 949     max_method = max_task->method();
 950   }
 951 
 952   methodHandle max_method_h(THREAD, max_method);
 953 
 954   if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
 955       max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
 956     max_task->set_comp_level(CompLevel_limited_profile);
 957 
 958     if (CompileBroker::compilation_is_complete(max_method_h(), max_task->osr_bci(), CompLevel_limited_profile,
 959                                                false /* requires_online_compilation */,
 960                                                CompileTask::Reason_None)) {
 961       if (PrintTieredEvents) {
 962         print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 963       }
 964       compile_queue->remove_and_mark_stale(max_task);
 965       max_method->clear_queued_for_compilation();
 966       return nullptr;
 967     }
 968 
 969     if (PrintTieredEvents) {
 970       print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 971     }
 972   }
 973   return max_task;
 974 }
 975 
 976 void CompilationPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
 977   for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
 978     if (PrintTieredEvents) {
 979       print_event(REPROFILE, sd->method(), sd->method(), InvocationEntryBci, CompLevel_none);
 980     }
 981     MethodData* mdo = sd->method()->method_data();
 982     if (mdo != nullptr) {
 983       mdo->reset_start_counters();
 984     }
 985     if (sd->is_top()) break;
 986   }
 987 }
 988 
 989 nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle& inlinee,
 990                                       int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
 991   if (PrintTieredEvents) {
 992     print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
 993   }
 994 
 995 #if INCLUDE_JVMCI
 996   if (EnableJVMCI && UseJVMCICompiler &&
 997       comp_level == CompLevel_full_optimization && !AOTLinkedClassBulkLoader::class_preloading_finished()) {
 998     return nullptr;
 999   }
1000 #endif
1001 
1002   if (comp_level == CompLevel_none &&
1003       JvmtiExport::can_post_interpreter_events() &&
1004       THREAD->is_interp_only_mode()) {
1005     return nullptr;
1006   }
1007   if (ReplayCompiles) {
1008     // Don't trigger other compiles in testing mode
1009     return nullptr;
1010   }
1011 
1012   handle_counter_overflow(method);
1013   if (method() != inlinee()) {
1014     handle_counter_overflow(inlinee);
1015   }
1016 
1017   if (bci == InvocationEntryBci) {
1018     method_invocation_event(method, inlinee, comp_level, nm, THREAD);
1019   } else {
1020     // method == inlinee if the event originated in the main method
1021     method_back_branch_event(method, inlinee, bci, comp_level, nm, THREAD);
1022     // Check if event led to a higher level OSR compilation
1023     CompLevel expected_comp_level = MIN2(CompLevel_full_optimization, static_cast<CompLevel>(comp_level + 1));
1024     if (!CompilationModeFlag::disable_intermediate() && inlinee->is_not_osr_compilable(expected_comp_level)) {
1025       // It's not possible to reach the expected level so fall back to simple.
1026       expected_comp_level = CompLevel_simple;
1027     }
1028     CompLevel max_osr_level = static_cast<CompLevel>(inlinee->highest_osr_comp_level());
1029     if (max_osr_level >= expected_comp_level) { // fast check to avoid locking in a typical scenario
1030       nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, expected_comp_level, false);
1031       assert(osr_nm == nullptr || osr_nm->comp_level() >= expected_comp_level, "lookup_osr_nmethod_for is broken");
1032       if (osr_nm != nullptr && osr_nm->comp_level() != comp_level) {
1033         // Perform OSR with new nmethod
1034         return osr_nm;
1035       }
1036     }
1037   }
1038   return nullptr;
1039 }
1040 
1041 // Check if the method can be compiled, change level if necessary
1042 void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level, TRAPS) {
1043   assert(verify_level(level), "Invalid compilation level requested: %d", level);
1044 
1045   if (level == CompLevel_none) {
1046     if (mh->has_compiled_code()) {
1047       // Happens when we switch to interpreter to profile.
1048       MutexLocker ml(Compile_lock);
1049       NoSafepointVerifier nsv;
1050       if (mh->has_compiled_code()) {
1051         mh->code()->make_not_used();
1052       }
1053       // Deoptimize immediately (we don't have to wait for a compile).
1054       JavaThread* jt = THREAD;
1055       RegisterMap map(jt,
1056                       RegisterMap::UpdateMap::skip,
1057                       RegisterMap::ProcessFrames::include,
1058                       RegisterMap::WalkContinuation::skip);
1059       frame fr = jt->last_frame().sender(&map);
1060       Deoptimization::deoptimize_frame(jt, fr.id());
1061     }
1062     return;
1063   }
1064 
1065   if (!CompilationModeFlag::disable_intermediate()) {
1066     // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling
1067     // in the interpreter and then compile with C2 (the transition function will request that,
1068     // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with
1069     // pure C1.
1070     if ((bci == InvocationEntryBci && !can_be_compiled(mh, level))) {
1071       if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
1072         compile(mh, bci, CompLevel_simple, THREAD);
1073       }
1074       return;
1075     }
1076     if ((bci != InvocationEntryBci && !can_be_osr_compiled(mh, level))) {
1077       if (level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) {
1078         nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
1079         if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) {
1080           // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
1081           osr_nm->make_not_entrant();
1082         }
1083         compile(mh, bci, CompLevel_simple, THREAD);
1084       }
1085       return;
1086     }
1087   }
1088   if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
1089     return;
1090   }
1091   if (!CompileBroker::compilation_is_in_queue(mh)) {
1092     if (PrintTieredEvents) {
1093       print_event(COMPILE, mh(), mh(), bci, level);
1094     }
1095     int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
1096     update_rate(nanos_to_millis(os::javaTimeNanos()), mh);
1097     bool requires_online_compilation = false;
1098     if (TrainingData::have_data()) {
1099       MethodTrainingData* mtd = MethodTrainingData::find(mh);
1100       if (mtd != nullptr) {
1101         CompileTrainingData* ctd = mtd->last_toplevel_compile(level);
1102         if (ctd != nullptr) {
1103           requires_online_compilation = (ctd->init_deps_left() > 0);
1104         }
1105       }
1106     }
1107     CompileBroker::compile_method(mh, bci, level, mh, hot_count, requires_online_compilation, CompileTask::Reason_Tiered, THREAD);
1108   }
1109 }
1110 
1111 // update_rate() is called from select_task() while holding a compile queue lock.
1112 void CompilationPolicy::update_rate(int64_t t, const methodHandle& method) {
1113   // Skip update if counters are absent.
1114   // Can't allocate them since we are holding compile queue lock.
1115   if (method->method_counters() == nullptr)  return;
1116 
1117   if (is_old(method)) {
1118     // We don't remove old methods from the queue,
1119     // so we can just zero the rate.
1120     method->set_rate(0);
1121     return;
1122   }
1123 
1124   // We don't update the rate if we've just came out of a safepoint.
1125   // delta_s is the time since last safepoint in milliseconds.
1126   int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
1127   int64_t delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement
1128   // How many events were there since the last time?
1129   int event_count = method->invocation_count() + method->backedge_count();
1130   int delta_e = event_count - method->prev_event_count();
1131 
1132   // We should be running for at least 1ms.
1133   if (delta_s >= TieredRateUpdateMinTime) {
1134     // And we must've taken the previous point at least 1ms before.
1135     if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
1136       method->set_prev_time(t);
1137       method->set_prev_event_count(event_count);
1138       method->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
1139     } else {
1140       if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
1141         // If nothing happened for 25ms, zero the rate. Don't modify prev values.
1142         method->set_rate(0);
1143       }
1144     }
1145   }
1146 }
1147 
1148 // Check if this method has been stale for a given number of milliseconds.
1149 // See select_task().
1150 bool CompilationPolicy::is_stale(int64_t t, int64_t timeout, const methodHandle& method) {
1151   int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
1152   int64_t delta_t = t - method->prev_time();
1153   if (delta_t > timeout && delta_s > timeout) {
1154     int event_count = method->invocation_count() + method->backedge_count();
1155     int delta_e = event_count - method->prev_event_count();
1156     // Return true if there were no events.
1157     return delta_e == 0;
1158   }
1159   return false;
1160 }
1161 
1162 // We don't remove old methods from the compile queue even if they have
1163 // very low activity. See select_task().
1164 bool CompilationPolicy::is_old(const methodHandle& method) {
1165   int i = method->invocation_count();
1166   int b = method->backedge_count();
1167   double k = TieredOldPercentage / 100.0;
1168 
1169   return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1170 }
1171 
1172 double CompilationPolicy::weight(Method* method) {
1173   return (double)(method->rate() + 1) * (method->invocation_count() + 1) * (method->backedge_count() + 1);
1174 }
1175 
1176 // Apply heuristics and return true if x should be compiled before y
1177 bool CompilationPolicy::compare_methods(Method* x, Method* y) {
1178   if (x->highest_comp_level() > y->highest_comp_level()) {
1179     // recompilation after deopt
1180     return true;
1181   } else
1182     if (x->highest_comp_level() == y->highest_comp_level()) {
1183       if (weight(x) > weight(y)) {
1184         return true;
1185       }
1186     }
1187   return false;
1188 }
1189 
1190 bool CompilationPolicy::compare_tasks(CompileTask* x, CompileTask* y) {
1191   assert(!x->is_scc() && !y->is_scc(), "SC tasks are not expected here");
1192   if (x->compile_reason() != y->compile_reason() && y->compile_reason() == CompileTask::Reason_MustBeCompiled) {
1193     return true;
1194   }
1195   return false;
1196 }
1197 
1198 // Is method profiled enough?
1199 bool CompilationPolicy::is_method_profiled(const methodHandle& method) {
1200   MethodData* mdo = method->method_data();
1201   if (mdo != nullptr) {
1202     int i = mdo->invocation_count_delta();
1203     int b = mdo->backedge_count_delta();
1204     return CallPredicate::apply_scaled(method, CompLevel_full_profile, i, b, 1);
1205   }
1206   return false;
1207 }
1208 
1209 
1210 // Determine is a method is mature.
1211 bool CompilationPolicy::is_mature(MethodData* mdo) {
1212   if (Arguments::is_compiler_only()) {
1213     // Always report profiles as immature with -Xcomp
1214     return false;
1215   }
1216   methodHandle mh(Thread::current(), mdo->method());
1217   if (mdo != nullptr) {
1218     int i = mdo->invocation_count();
1219     int b = mdo->backedge_count();
1220     double k = ProfileMaturityPercentage / 100.0;
1221     return CallPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k) || LoopPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k);
1222   }
1223   return false;
1224 }
1225 
1226 // If a method is old enough and is still in the interpreter we would want to
1227 // start profiling without waiting for the compiled method to arrive.
1228 // We also take the load on compilers into the account.
1229 bool CompilationPolicy::should_create_mdo(const methodHandle& method, CompLevel cur_level) {
1230   if (cur_level != CompLevel_none || force_comp_at_level_simple(method) || CompilationModeFlag::quick_only() || !ProfileInterpreter) {
1231     return false;
1232   }
1233 
1234   if (TrainingData::have_data()) {
1235     MethodTrainingData* mtd = MethodTrainingData::find(method);
1236     if (mtd != nullptr && mtd->saw_level(CompLevel_full_optimization)) {
1237       return true;
1238     }
1239     return false;
1240   }
1241 
1242   if (is_old(method)) {
1243     return true;
1244   }
1245 
1246   int i = method->invocation_count();
1247   int b = method->backedge_count();
1248   double k = Tier0ProfilingStartPercentage / 100.0;
1249 
1250   // If the top level compiler is not keeping up, delay profiling.
1251   if (CompileBroker::queue_size(CompLevel_full_optimization) <= Tier0Delay * compiler_count(CompLevel_full_optimization)) {
1252     return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1253   }
1254   return false;
1255 }
1256 
1257 // Inlining control: if we're compiling a profiled method with C1 and the callee
1258 // is known to have OSRed in a C2 version, don't inline it.
1259 bool CompilationPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
1260   CompLevel comp_level = (CompLevel)env->comp_level();
1261   if (comp_level == CompLevel_full_profile ||
1262       comp_level == CompLevel_limited_profile) {
1263     return callee->highest_osr_comp_level() == CompLevel_full_optimization;
1264   }
1265   return false;
1266 }
1267 
1268 // Create MDO if necessary.
1269 void CompilationPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
1270   if (mh->is_native() ||
1271       mh->is_abstract() ||
1272       mh->is_accessor() ||
1273       mh->is_constant_getter()) {
1274     return;
1275   }
1276   if (mh->method_data() == nullptr) {
1277     Method::build_profiling_method_data(mh, CHECK_AND_CLEAR);
1278   }
1279   if (ProfileInterpreter && THREAD->has_last_Java_frame()) {
1280     MethodData* mdo = mh->method_data();
1281     if (mdo != nullptr) {
1282       frame last_frame = THREAD->last_frame();
1283       if (last_frame.is_interpreted_frame() && mh == last_frame.interpreter_frame_method()) {
1284         int bci = last_frame.interpreter_frame_bci();
1285         address dp = mdo->bci_to_dp(bci);
1286         last_frame.interpreter_frame_set_mdp(dp);
1287       }
1288     }
1289   }
1290 }
1291 
1292 CompLevel CompilationPolicy::trained_transition_from_none(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1293   precond(mtd != nullptr);
1294   precond(cur_level == CompLevel_none);
1295 
1296   if (mtd->only_inlined() && !mtd->saw_level(CompLevel_full_optimization)) {
1297     return CompLevel_none;
1298   }
1299 
1300   bool training_has_profile = (mtd->final_profile() != nullptr);
1301   if (mtd->saw_level(CompLevel_full_optimization) && !training_has_profile) {
1302     return CompLevel_full_profile;
1303   }
1304 
1305   CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
1306   switch (highest_training_level) {
1307     case CompLevel_limited_profile:
1308     case CompLevel_full_profile:
1309       return CompLevel_limited_profile;
1310     case CompLevel_simple:
1311       return CompLevel_simple;
1312     case CompLevel_none:
1313       return CompLevel_none;
1314     default:
1315       break;
1316   }
1317 
1318   // Now handle the case of level 4.
1319   assert(highest_training_level == CompLevel_full_optimization, "Unexpected compilation level: %d", highest_training_level);
1320   if (!training_has_profile) {
1321     // The method was a part of a level 4 compile, but don't have a stored profile,
1322     // we need to profile it.
1323     return CompLevel_full_profile;
1324   }
1325   const bool deopt = (static_cast<CompLevel>(method->highest_comp_level()) == CompLevel_full_optimization);
1326   // If we deopted, then we reprofile
1327   if (deopt && !is_method_profiled(method)) {
1328     return CompLevel_full_profile;
1329   }
1330 
1331   CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
1332   assert(ctd != nullptr, "Should have CTD for CompLevel_full_optimization");
1333   // With SkipTier2IfPossible and all deps satisfied, go to level 4 immediately
1334   if (SkipTier2IfPossible && ctd->init_deps_left() == 0) {
1335     if (method->method_data() == nullptr) {
1336       create_mdo(method, THREAD);
1337     }
1338     return CompLevel_full_optimization;
1339   }
1340 
1341   // Otherwise go to level 2
1342   return CompLevel_limited_profile;
1343 }
1344 
1345 
1346 CompLevel CompilationPolicy::trained_transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1347   precond(mtd != nullptr);
1348   precond(cur_level == CompLevel_limited_profile);
1349 
1350   // One of the main reasons that we can get here is that we're waiting for the stored C2 code to become ready.
1351 
1352   // But first, check if we have a saved profile
1353   bool training_has_profile = (mtd->final_profile() != nullptr);
1354   if (!training_has_profile) {
1355     return CompLevel_full_profile;
1356   }
1357 
1358 
1359   assert(training_has_profile, "Have to have a profile to be here");
1360   // Check if the method is ready
1361   CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
1362   if (ctd != nullptr && ctd->init_deps_left() == 0) {
1363     if (method->method_data() == nullptr) {
1364       create_mdo(method, THREAD);
1365     }
1366     return CompLevel_full_optimization;
1367   }
1368 
1369   // Otherwise stay at the current level
1370   return CompLevel_limited_profile;
1371 }
1372 
1373 
1374 CompLevel CompilationPolicy::trained_transition_from_full_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1375   precond(mtd != nullptr);
1376   precond(cur_level == CompLevel_full_profile);
1377 
1378   CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
1379   // We have method at the full profile level and we also know that it's possibly an important method.
1380   if (highest_training_level == CompLevel_full_optimization && !mtd->only_inlined()) {
1381     // Check if it is adequately profiled
1382     if (is_method_profiled(method)) {
1383       return CompLevel_full_optimization;
1384     }
1385   }
1386 
1387   // Otherwise stay at the current level
1388   return CompLevel_full_profile;
1389 }
1390 
1391 CompLevel CompilationPolicy::trained_transition(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1392   precond(MethodTrainingData::have_data());
1393 
1394   // If there is no training data recorded for this method, bail out.
1395   if (mtd == nullptr) {
1396     return cur_level;
1397   }
1398 
1399   CompLevel next_level = cur_level;
1400   switch(cur_level) {
1401     default: break;
1402     case CompLevel_none:
1403       next_level = trained_transition_from_none(method, cur_level, mtd, THREAD);
1404       break;
1405     case CompLevel_limited_profile:
1406       next_level = trained_transition_from_limited_profile(method, cur_level, mtd, THREAD);
1407       break;
1408     case CompLevel_full_profile:
1409       next_level = trained_transition_from_full_profile(method, cur_level, mtd, THREAD);
1410       break;
1411   }
1412 
1413   // We don't have any special strategies for the C2-only compilation modes, so just fix up the levels for now.
1414   if (CompilationModeFlag::high_only_quick_internal() && CompLevel_simple < next_level && next_level < CompLevel_full_optimization) {
1415     return CompLevel_none;
1416   }
1417   if (CompilationModeFlag::high_only() && next_level < CompLevel_full_optimization) {
1418     return CompLevel_none;
1419   }
1420   return (cur_level != next_level) ? limit_level(next_level) : cur_level;
1421 }
1422 
1423 /*
1424  * Method states:
1425  *   0 - interpreter (CompLevel_none)
1426  *   1 - pure C1 (CompLevel_simple)
1427  *   2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
1428  *   3 - C1 with full profiling (CompLevel_full_profile)
1429  *   4 - C2 or Graal (CompLevel_full_optimization)
1430  *
1431  * Common state transition patterns:
1432  * a. 0 -> 3 -> 4.
1433  *    The most common path. But note that even in this straightforward case
1434  *    profiling can start at level 0 and finish at level 3.
1435  *
1436  * b. 0 -> 2 -> 3 -> 4.
1437  *    This case occurs when the load on C2 is deemed too high. So, instead of transitioning
1438  *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
1439  *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
1440  *
1441  * c. 0 -> (3->2) -> 4.
1442  *    In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
1443  *    to enable the profiling to fully occur at level 0. In this case we change the compilation level
1444  *    of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
1445  *    without full profiling while c2 is compiling.
1446  *
1447  * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
1448  *    After a method was once compiled with C1 it can be identified as trivial and be compiled to
1449  *    level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
1450  *
1451  * e. 0 -> 4.
1452  *    This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
1453  *    or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
1454  *    the compiled version already exists).
1455  *
1456  * Note that since state 0 can be reached from any other state via deoptimization different loops
1457  * are possible.
1458  *
1459  */
1460 
1461 // Common transition function. Given a predicate determines if a method should transition to another level.
1462 template<typename Predicate>
1463 CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD, bool disable_feedback) {
1464   CompLevel next_level = cur_level;
1465   int i = method->invocation_count();
1466   int b = method->backedge_count();
1467 
1468   if (force_comp_at_level_simple(method)) {
1469     next_level = CompLevel_simple;
1470   } else {
1471     if (MethodTrainingData::have_data()) {
1472       MethodTrainingData* mtd = MethodTrainingData::find(method);
1473       if (mtd == nullptr) {
1474         // We haven't see compilations of this method in training. It's either very cold or the behavior changed.
1475         // Feed it to the standard TF with no profiling delay.
1476         next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
1477       } else {
1478         next_level = trained_transition(method, cur_level, mtd, THREAD);
1479         if (cur_level == next_level) {
1480           // trained_transtion() is going to return the same level if no startup/warmup optimizations apply.
1481           // In order to catch possible pathologies due to behavior change we feed the event to the regular
1482           // TF but with profiling delay.
1483           next_level = standard_transition<Predicate>(method, cur_level, true /*delay_profiling*/, disable_feedback);
1484         }
1485       }
1486     } else if (is_trivial(method) || method->is_native()) {
1487       next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_full_optimization : CompLevel_simple;
1488     } else {
1489       next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
1490     }
1491   }
1492   return (next_level != cur_level) ? limit_level(next_level) : next_level;
1493 }
1494 
1495 
1496 template<typename Predicate>
1497 CompLevel CompilationPolicy::standard_transition(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1498   CompLevel next_level = cur_level;
1499   switch(cur_level) {
1500   default: break;
1501   case CompLevel_none:
1502     next_level = transition_from_none<Predicate>(method, cur_level, delay_profiling, disable_feedback);
1503     break;
1504   case CompLevel_limited_profile:
1505     next_level = transition_from_limited_profile<Predicate>(method, cur_level, delay_profiling, disable_feedback);
1506     break;
1507   case CompLevel_full_profile:
1508     next_level = transition_from_full_profile<Predicate>(method, cur_level);
1509     break;
1510   }
1511   return next_level;
1512 }
1513 
1514 template<typename Predicate>
1515 CompLevel CompilationPolicy::transition_from_none(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1516   precond(cur_level == CompLevel_none);
1517   CompLevel next_level = cur_level;
1518   int i = method->invocation_count();
1519   int b = method->backedge_count();
1520   double scale = delay_profiling ? Tier0ProfileDelayFactor : 1.0;
1521   // If we were at full profile level, would we switch to full opt?
1522   if (transition_from_full_profile<Predicate>(method, CompLevel_full_profile) == CompLevel_full_optimization) {
1523     next_level = CompLevel_full_optimization;
1524   } else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply_scaled(method, cur_level, i, b, scale)) {
1525     // C1-generated fully profiled code is about 30% slower than the limited profile
1526     // code that has only invocation and backedge counters. The observation is that
1527     // if C2 queue is large enough we can spend too much time in the fully profiled code
1528     // while waiting for C2 to pick the method from the queue. To alleviate this problem
1529     // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
1530     // we choose to compile a limited profiled version and then recompile with full profiling
1531     // when the load on C2 goes down.
1532     if (delay_profiling || (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > Tier3DelayOn * compiler_count(CompLevel_full_optimization))) {
1533       next_level = CompLevel_limited_profile;
1534     } else {
1535       next_level = CompLevel_full_profile;
1536     }
1537   }
1538   return next_level;
1539 }
1540 
1541 template<typename Predicate>
1542 CompLevel CompilationPolicy::transition_from_full_profile(const methodHandle& method, CompLevel cur_level) {
1543   precond(cur_level == CompLevel_full_profile);
1544   CompLevel next_level = cur_level;
1545   MethodData* mdo = method->method_data();
1546   if (mdo != nullptr) {
1547     if (mdo->would_profile() || CompilationModeFlag::disable_intermediate()) {
1548       int mdo_i = mdo->invocation_count_delta();
1549       int mdo_b = mdo->backedge_count_delta();
1550       if (Predicate::apply(method, cur_level, mdo_i, mdo_b)) {
1551         next_level = CompLevel_full_optimization;
1552       }
1553     } else {
1554       next_level = CompLevel_full_optimization;
1555     }
1556   }
1557   return next_level;
1558 }
1559 
1560 template<typename Predicate>
1561 CompLevel CompilationPolicy::transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1562   precond(cur_level == CompLevel_limited_profile);
1563   CompLevel next_level = cur_level;
1564   int i = method->invocation_count();
1565   int b = method->backedge_count();
1566   double scale = delay_profiling ? Tier2ProfileDelayFactor : 1.0;
1567   MethodData* mdo = method->method_data();
1568   if (mdo != nullptr) {
1569     if (mdo->would_profile()) {
1570       if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1571                               Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1572                               Predicate::apply_scaled(method, cur_level, i, b, scale))) {
1573         next_level = CompLevel_full_profile;
1574       }
1575     } else {
1576       next_level = CompLevel_full_optimization;
1577     }
1578   } else {
1579     // If there is no MDO we need to profile
1580     if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1581                             Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1582                             Predicate::apply_scaled(method, cur_level, i, b, scale))) {
1583       next_level = CompLevel_full_profile;
1584     }
1585   }
1586   if (next_level == CompLevel_full_profile && is_method_profiled(method)) {
1587     next_level = CompLevel_full_optimization;
1588   }
1589   return next_level;
1590 }
1591 
1592 
1593 // Determine if a method should be compiled with a normal entry point at a different level.
1594 CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
1595   CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common<LoopPredicate>(method, cur_level, THREAD, true));
1596   CompLevel next_level = common<CallPredicate>(method, cur_level, THREAD, !TrainingData::have_data() && is_old(method));
1597 
1598   // If OSR method level is greater than the regular method level, the levels should be
1599   // equalized by raising the regular method level in order to avoid OSRs during each
1600   // invocation of the method.
1601   if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
1602     MethodData* mdo = method->method_data();
1603     guarantee(mdo != nullptr, "MDO should not be nullptr");
1604     if (mdo->invocation_count() >= 1) {
1605       next_level = CompLevel_full_optimization;
1606     }
1607   } else {
1608     next_level = MAX2(osr_level, next_level);
1609   }
1610 #if INCLUDE_JVMCI
1611   if (EnableJVMCI && UseJVMCICompiler &&
1612       next_level == CompLevel_full_optimization && !AOTLinkedClassBulkLoader::class_preloading_finished()) {
1613     next_level = cur_level;
1614   }
1615 #endif
1616   return next_level;
1617 }
1618 
1619 // Determine if we should do an OSR compilation of a given method.
1620 CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
1621   CompLevel next_level = common<LoopPredicate>(method, cur_level, THREAD, true);
1622   if (cur_level == CompLevel_none) {
1623     // If there is a live OSR method that means that we deopted to the interpreter
1624     // for the transition.
1625     CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
1626     if (osr_level > CompLevel_none) {
1627       return osr_level;
1628     }
1629   }
1630   return next_level;
1631 }
1632 
1633 // Handle the invocation event.
1634 void CompilationPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
1635                                                       CompLevel level, nmethod* nm, TRAPS) {
1636   if (should_create_mdo(mh, level)) {
1637     create_mdo(mh, THREAD);
1638   }
1639   CompLevel next_level = call_event(mh, level, THREAD);
1640   if (next_level != level) {
1641     if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
1642       compile(mh, InvocationEntryBci, next_level, THREAD);
1643     }
1644   }
1645 }
1646 
1647 // Handle the back branch event. Notice that we can compile the method
1648 // with a regular entry from here.
1649 void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
1650                                                      int bci, CompLevel level, nmethod* nm, TRAPS) {
1651   if (should_create_mdo(mh, level)) {
1652     create_mdo(mh, THREAD);
1653   }
1654   // Check if MDO should be created for the inlined method
1655   if (should_create_mdo(imh, level)) {
1656     create_mdo(imh, THREAD);
1657   }
1658 
1659   if (is_compilation_enabled()) {
1660     CompLevel next_osr_level = loop_event(imh, level, THREAD);
1661     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
1662     // At the very least compile the OSR version
1663     if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
1664       compile(imh, bci, next_osr_level, CHECK);
1665     }
1666 
1667     // Use loop event as an opportunity to also check if there's been
1668     // enough calls.
1669     CompLevel cur_level, next_level;
1670     if (mh() != imh()) { // If there is an enclosing method
1671       {
1672         guarantee(nm != nullptr, "Should have nmethod here");
1673         cur_level = comp_level(mh());
1674         next_level = call_event(mh, cur_level, THREAD);
1675 
1676         if (max_osr_level == CompLevel_full_optimization) {
1677           // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
1678           bool make_not_entrant = false;
1679           if (nm->is_osr_method()) {
1680             // This is an osr method, just make it not entrant and recompile later if needed
1681             make_not_entrant = true;
1682           } else {
1683             if (next_level != CompLevel_full_optimization) {
1684               // next_level is not full opt, so we need to recompile the
1685               // enclosing method without the inlinee
1686               cur_level = CompLevel_none;
1687               make_not_entrant = true;
1688             }
1689           }
1690           if (make_not_entrant) {
1691             if (PrintTieredEvents) {
1692               int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
1693               print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
1694             }
1695             nm->make_not_entrant();
1696           }
1697         }
1698         // Fix up next_level if necessary to avoid deopts
1699         if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
1700           next_level = CompLevel_full_profile;
1701         }
1702         if (cur_level != next_level) {
1703           if (!CompileBroker::compilation_is_in_queue(mh)) {
1704             compile(mh, InvocationEntryBci, next_level, THREAD);
1705           }
1706         }
1707       }
1708     } else {
1709       cur_level = comp_level(mh());
1710       next_level = call_event(mh, cur_level, THREAD);
1711       if (next_level != cur_level) {
1712         if (!CompileBroker::compilation_is_in_queue(mh)) {
1713           compile(mh, InvocationEntryBci, next_level, THREAD);
1714         }
1715       }
1716     }
1717   }
1718 }
1719