1 /*
   2  * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/aotLinkedClassBulkLoader.hpp"
  26 #include "cds/cdsConfig.hpp"
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/vmClasses.hpp"
  30 #include "classfile/vmSymbols.hpp"
  31 #include "code/aotCodeCache.hpp"
  32 #include "code/codeCache.hpp"
  33 #include "code/codeHeapState.hpp"
  34 #include "code/dependencyContext.hpp"
  35 #include "compiler/compilationLog.hpp"
  36 #include "compiler/compilationMemoryStatistic.hpp"
  37 #include "compiler/compilationPolicy.hpp"
  38 #include "compiler/compileBroker.hpp"
  39 #include "compiler/compileLog.hpp"
  40 #include "compiler/compilerDefinitions.inline.hpp"
  41 #include "compiler/compilerEvent.hpp"
  42 #include "compiler/compilerOracle.hpp"
  43 #include "compiler/directivesParser.hpp"
  44 #include "compiler/recompilationPolicy.hpp"
  45 #include "gc/shared/memAllocator.hpp"
  46 #include "interpreter/linkResolver.hpp"
  47 #include "jfr/jfrEvents.hpp"
  48 #include "jvm.h"
  49 #include "logging/log.hpp"
  50 #include "logging/logStream.hpp"
  51 #include "memory/allocation.inline.hpp"
  52 #include "memory/resourceArea.hpp"
  53 #include "memory/universe.hpp"
  54 #include "oops/method.inline.hpp"
  55 #include "oops/methodData.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "prims/jvmtiExport.hpp"
  58 #include "prims/nativeLookup.hpp"
  59 #include "prims/whitebox.hpp"
  60 #include "runtime/atomic.hpp"
  61 #include "runtime/escapeBarrier.hpp"
  62 #include "runtime/globals_extension.hpp"
  63 #include "runtime/handles.inline.hpp"
  64 #include "runtime/init.hpp"
  65 #include "runtime/interfaceSupport.inline.hpp"
  66 #include "runtime/java.hpp"
  67 #include "runtime/javaCalls.hpp"
  68 #include "runtime/jniHandles.inline.hpp"
  69 #include "runtime/os.hpp"
  70 #include "runtime/perfData.hpp"
  71 #include "runtime/safepointVerifiers.hpp"
  72 #include "runtime/sharedRuntime.hpp"
  73 #include "runtime/threads.hpp"
  74 #include "runtime/threadSMR.inline.hpp"
  75 #include "runtime/timerTrace.hpp"
  76 #include "runtime/vframe.inline.hpp"
  77 #include "services/management.hpp"
  78 #include "utilities/debug.hpp"
  79 #include "utilities/dtrace.hpp"
  80 #include "utilities/events.hpp"
  81 #include "utilities/formatBuffer.hpp"
  82 #include "utilities/macros.hpp"
  83 #include "utilities/nonblockingQueue.inline.hpp"
  84 #ifdef COMPILER1
  85 #include "c1/c1_Compiler.hpp"
  86 #endif
  87 #ifdef COMPILER2
  88 #include "opto/c2compiler.hpp"
  89 #endif
  90 #if INCLUDE_JVMCI
  91 #include "jvmci/jvmciEnv.hpp"
  92 #include "jvmci/jvmciRuntime.hpp"
  93 #endif
  94 
  95 #ifdef DTRACE_ENABLED
  96 
  97 // Only bother with this argument setup if dtrace is available
  98 
  99 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name)             \
 100   {                                                                      \
 101     Symbol* klass_name = (method)->klass_name();                         \
 102     Symbol* name = (method)->name();                                     \
 103     Symbol* signature = (method)->signature();                           \
 104     HOTSPOT_METHOD_COMPILE_BEGIN(                                        \
 105       (char *) comp_name, strlen(comp_name),                             \
 106       (char *) klass_name->bytes(), klass_name->utf8_length(),           \
 107       (char *) name->bytes(), name->utf8_length(),                       \
 108       (char *) signature->bytes(), signature->utf8_length());            \
 109   }
 110 
 111 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success)      \
 112   {                                                                      \
 113     Symbol* klass_name = (method)->klass_name();                         \
 114     Symbol* name = (method)->name();                                     \
 115     Symbol* signature = (method)->signature();                           \
 116     HOTSPOT_METHOD_COMPILE_END(                                          \
 117       (char *) comp_name, strlen(comp_name),                             \
 118       (char *) klass_name->bytes(), klass_name->utf8_length(),           \
 119       (char *) name->bytes(), name->utf8_length(),                       \
 120       (char *) signature->bytes(), signature->utf8_length(), (success)); \
 121   }
 122 
 123 #else //  ndef DTRACE_ENABLED
 124 
 125 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name)
 126 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success)
 127 
 128 #endif // ndef DTRACE_ENABLED
 129 
 130 bool CompileBroker::_initialized = false;
 131 volatile bool CompileBroker::_should_block = false;
 132 volatile int  CompileBroker::_print_compilation_warning = 0;
 133 volatile jint CompileBroker::_should_compile_new_jobs = run_compilation;
 134 
 135 // The installed compiler(s)
 136 AbstractCompiler* CompileBroker::_compilers[2];
 137 
 138 // The maximum numbers of compiler threads to be determined during startup.
 139 int CompileBroker::_c1_count = 0;
 140 int CompileBroker::_c2_count = 0;
 141 int CompileBroker::_ac_count = 0;
 142 
 143 // An array of compiler names as Java String objects
 144 jobject* CompileBroker::_compiler1_objects = nullptr;
 145 jobject* CompileBroker::_compiler2_objects = nullptr;
 146 jobject* CompileBroker::_ac_objects = nullptr;
 147 
 148 CompileLog** CompileBroker::_compiler1_logs = nullptr;
 149 CompileLog** CompileBroker::_compiler2_logs = nullptr;
 150 CompileLog** CompileBroker::_ac_logs = nullptr;
 151 
 152 // These counters are used to assign an unique ID to each compilation.
 153 volatile jint CompileBroker::_compilation_id     = 0;
 154 volatile jint CompileBroker::_osr_compilation_id = 0;
 155 volatile jint CompileBroker::_native_compilation_id = 0;
 156 
 157 // Performance counters
 158 PerfCounter* CompileBroker::_perf_total_compilation = nullptr;
 159 PerfCounter* CompileBroker::_perf_osr_compilation = nullptr;
 160 PerfCounter* CompileBroker::_perf_standard_compilation = nullptr;
 161 
 162 PerfCounter* CompileBroker::_perf_total_bailout_count = nullptr;
 163 PerfCounter* CompileBroker::_perf_total_invalidated_count = nullptr;
 164 PerfCounter* CompileBroker::_perf_total_compile_count = nullptr;
 165 PerfCounter* CompileBroker::_perf_total_osr_compile_count = nullptr;
 166 PerfCounter* CompileBroker::_perf_total_standard_compile_count = nullptr;
 167 
 168 PerfCounter* CompileBroker::_perf_sum_osr_bytes_compiled = nullptr;
 169 PerfCounter* CompileBroker::_perf_sum_standard_bytes_compiled = nullptr;
 170 PerfCounter* CompileBroker::_perf_sum_nmethod_size = nullptr;
 171 PerfCounter* CompileBroker::_perf_sum_nmethod_code_size = nullptr;
 172 
 173 PerfStringVariable* CompileBroker::_perf_last_method = nullptr;
 174 PerfStringVariable* CompileBroker::_perf_last_failed_method = nullptr;
 175 PerfStringVariable* CompileBroker::_perf_last_invalidated_method = nullptr;
 176 PerfVariable*       CompileBroker::_perf_last_compile_type = nullptr;
 177 PerfVariable*       CompileBroker::_perf_last_compile_size = nullptr;
 178 PerfVariable*       CompileBroker::_perf_last_failed_type = nullptr;
 179 PerfVariable*       CompileBroker::_perf_last_invalidated_type = nullptr;
 180 
 181 // Timers and counters for generating statistics
 182 elapsedTimer CompileBroker::_t_total_compilation;
 183 elapsedTimer CompileBroker::_t_osr_compilation;
 184 elapsedTimer CompileBroker::_t_standard_compilation;
 185 elapsedTimer CompileBroker::_t_invalidated_compilation;
 186 elapsedTimer CompileBroker::_t_bailedout_compilation;
 187 
 188 uint CompileBroker::_total_bailout_count            = 0;
 189 uint CompileBroker::_total_invalidated_count        = 0;
 190 uint CompileBroker::_total_not_entrant_count        = 0;
 191 uint CompileBroker::_total_compile_count            = 0;
 192 uint CompileBroker::_total_osr_compile_count        = 0;
 193 uint CompileBroker::_total_standard_compile_count   = 0;
 194 uint CompileBroker::_total_compiler_stopped_count   = 0;
 195 uint CompileBroker::_total_compiler_restarted_count = 0;
 196 
 197 uint CompileBroker::_sum_osr_bytes_compiled         = 0;
 198 uint CompileBroker::_sum_standard_bytes_compiled    = 0;
 199 uint CompileBroker::_sum_nmethod_size               = 0;
 200 uint CompileBroker::_sum_nmethod_code_size          = 0;
 201 
 202 jlong CompileBroker::_peak_compilation_time        = 0;
 203 
 204 CompilerStatistics CompileBroker::_stats_per_level[CompLevel_full_optimization];
 205 CompilerStatistics CompileBroker::_aot_stats;
 206 CompilerStatistics CompileBroker::_aot_stats_per_level[CompLevel_full_optimization + 1];
 207 
 208 CompileQueue* CompileBroker::_c2_compile_queue     = nullptr;
 209 CompileQueue* CompileBroker::_c1_compile_queue     = nullptr;
 210 CompileQueue* CompileBroker::_ac1_compile_queue    = nullptr;
 211 CompileQueue* CompileBroker::_ac2_compile_queue    = nullptr;
 212 
 213 bool compileBroker_init() {
 214   if (LogEvents) {
 215     CompilationLog::init();
 216   }
 217 
 218   // init directives stack, adding default directive
 219   DirectivesStack::init();
 220 
 221   if (DirectivesParser::has_file()) {
 222     return DirectivesParser::parse_from_flag();
 223   } else if (CompilerDirectivesPrint) {
 224     // Print default directive even when no other was added
 225     DirectivesStack::print(tty);
 226   }
 227 
 228   return true;
 229 }
 230 
 231 CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) {
 232   CompilerThread* thread = CompilerThread::current();
 233   thread->set_task(task);
 234   CompileLog*     log  = thread->log();
 235   thread->timeout()->arm();
 236   if (log != nullptr && !task->is_unloaded())  task->log_task_start(log);
 237 }
 238 
 239 CompileTaskWrapper::~CompileTaskWrapper() {
 240   CompilerThread* thread = CompilerThread::current();
 241   CompileTask* task = thread->task();
 242   CompileLog*  log  = thread->log();
 243   AbstractCompiler* comp = thread->compiler();
 244   if (log != nullptr && !task->is_unloaded())  task->log_task_done(log);
 245   thread->set_task(nullptr);
 246   thread->set_env(nullptr);
 247   thread->timeout()->disarm();
 248   if (task->is_blocking()) {
 249     bool free_task = false;
 250     {
 251       MutexLocker notifier(thread, CompileTaskWait_lock);
 252       task->mark_complete();
 253 #if INCLUDE_JVMCI
 254       if (comp->is_jvmci()) {
 255         if (!task->has_waiter()) {
 256           // The waiting thread timed out and thus did not delete the task.
 257           free_task = true;
 258         }
 259         task->set_blocking_jvmci_compile_state(nullptr);
 260       }
 261 #endif
 262       if (!free_task) {
 263         // Notify the waiting thread that the compilation has completed
 264         // so that it can free the task.
 265         CompileTaskWait_lock->notify_all();
 266       }
 267     }
 268     if (free_task) {
 269       // The task can only be deleted once the task lock is released.
 270       delete task;
 271     }
 272   } else {
 273     task->mark_complete();
 274 
 275     // By convention, the compiling thread is responsible for deleting
 276     // a non-blocking CompileTask.
 277     delete task;
 278   }
 279 }
 280 
 281 /**
 282  * Check if a CompilerThread can be removed and update count if requested.
 283  */
 284 bool CompileBroker::can_remove(CompilerThread *ct, bool do_it) {
 285   assert(UseDynamicNumberOfCompilerThreads, "or shouldn't be here");
 286   if (!ReduceNumberOfCompilerThreads) return false;
 287 
 288   if (RecompilationPolicy::have_recompilation_work()) return false;
 289 
 290   AbstractCompiler *compiler = ct->compiler();
 291   int compiler_count = compiler->num_compiler_threads();
 292   bool c1 = compiler->is_c1();
 293 
 294   // Keep at least 1 compiler thread of each type.
 295   if (compiler_count < 2) return false;
 296 
 297   // Keep thread alive for at least some time.
 298   if (ct->idle_time_millis() < (c1 ? 500 : 100)) return false;
 299 
 300 #if INCLUDE_JVMCI
 301   if (compiler->is_jvmci() && !UseJVMCINativeLibrary) {
 302     // Handles for JVMCI thread objects may get released concurrently.
 303     if (do_it) {
 304       assert(CompileThread_lock->owner() == ct, "must be holding lock");
 305     } else {
 306       // Skip check if it's the last thread and let caller check again.
 307       return true;
 308     }
 309   }
 310 #endif
 311 
 312   // We only allow the last compiler thread of each type to get removed.
 313   jobject last_compiler = c1 ? compiler1_object(compiler_count - 1)
 314                              : compiler2_object(compiler_count - 1);
 315   if (ct->threadObj() == JNIHandles::resolve_non_null(last_compiler)) {
 316     if (do_it) {
 317       assert_locked_or_safepoint(CompileThread_lock); // Update must be consistent.
 318       compiler->set_num_compiler_threads(compiler_count - 1);
 319 #if INCLUDE_JVMCI
 320       if (compiler->is_jvmci() && !UseJVMCINativeLibrary) {
 321         // Old j.l.Thread object can die when no longer referenced elsewhere.
 322         JNIHandles::destroy_global(compiler2_object(compiler_count - 1));
 323         _compiler2_objects[compiler_count - 1] = nullptr;
 324       }
 325 #endif
 326     }
 327     return true;
 328   }
 329   return false;
 330 }
 331 
 332 /**
 333  * Add a CompileTask to a CompileQueue.
 334  */
 335 void CompileQueue::add(CompileTask* task) {
 336   assert(_lock->owned_by_self(), "must own lock");
 337 
 338   task->set_next(nullptr);
 339   task->set_prev(nullptr);
 340 
 341   if (_last == nullptr) {
 342     // The compile queue is empty.
 343     assert(_first == nullptr, "queue is empty");
 344     _first = task;
 345     _last = task;
 346   } else {
 347     // Append the task to the queue.
 348     assert(_last->next() == nullptr, "not last");
 349     _last->set_next(task);
 350     task->set_prev(_last);
 351     _last = task;
 352   }
 353   ++_size;
 354   ++_total_added;
 355   if (_size > _peak_size) {
 356     _peak_size = _size;
 357   }
 358 
 359   // Mark the method as being in the compile queue.
 360   task->method()->set_queued_for_compilation();
 361 
 362   task->mark_queued(os::elapsed_counter());
 363 
 364   if (CIPrintCompileQueue) {
 365     print_tty();
 366   }
 367 
 368   if (LogCompilation && xtty != nullptr) {
 369     task->log_task_queued();
 370   }
 371 
 372   if (TrainingData::need_data() && !CDSConfig::is_dumping_final_static_archive()) {
 373     CompileTrainingData* ctd = CompileTrainingData::make(task);
 374     if (ctd != nullptr) {
 375       task->set_training_data(ctd);
 376     }
 377   }
 378 
 379   // Notify CompilerThreads that a task is available.
 380   _lock->notify_all();
 381 }
 382 
 383 void CompileQueue::add_pending(CompileTask* task) {
 384   assert(_lock->owned_by_self() == false, "must NOT own lock");
 385   assert(UseLockFreeCompileQueues, "");
 386   task->method()->set_queued_for_compilation();
 387   _queue.push(*task);
 388   // FIXME: additional coordination needed? e.g., is it possible for compiler thread to block w/o processing pending tasks?
 389   if (is_empty()) {
 390     MutexLocker ml(_lock);
 391     _lock->notify_all();
 392   }
 393 }
 394 
 395 static bool process_pending(CompileTask* task) {
 396 //  guarantee(task->method()->queued_for_compilation(), "");
 397   if (task->is_unloaded()) {
 398     return true; // unloaded
 399   }
 400   task->method()->set_queued_for_compilation(); // FIXME
 401   if (task->method()->pending_queue_processed()) {
 402     return true; // already queued
 403   }
 404   // Mark the method as being in the compile queue.
 405   task->method()->set_pending_queue_processed();
 406   if (CompileBroker::compilation_is_complete(task->method(), task->osr_bci(), task->comp_level(),
 407                                              task->requires_online_compilation(), task->compile_reason())) {
 408     return true; // already compiled
 409   }
 410   return false; // active
 411 }
 412 
 413 void CompileQueue::transfer_pending() {
 414   assert(_lock->owned_by_self(), "must own lock");
 415 
 416   CompileTask* task;
 417   while ((task = _queue.pop()) != nullptr) {
 418     bool is_stale = process_pending(task);
 419     if (is_stale) {
 420       task->set_next(_first_stale);
 421       task->set_prev(nullptr);
 422       _first_stale = task;
 423     } else {
 424       add(task);
 425     }
 426   }
 427 }
 428 
 429 /**
 430  * Empties compilation queue by deleting all compilation tasks.
 431  * Furthermore, the method wakes up all threads that are waiting
 432  * on a compilation task to finish. This can happen if background
 433  * compilation is disabled.
 434  */
 435 void CompileQueue::delete_all() {
 436   MutexLocker mu(_lock);
 437   transfer_pending();
 438 
 439   CompileTask* current = _first;
 440 
 441   // Iterate over all tasks in the compile queue
 442   while (current != nullptr) {
 443     CompileTask* next = current->next();
 444     if (!current->is_blocking()) {
 445       // Non-blocking task. No one is waiting for it, delete it now.
 446       delete current;
 447     } else {
 448       // Blocking task. By convention, it is the waiters responsibility
 449       // to delete the task. We cannot delete it here, because we do not
 450       // coordinate with waiters. We will notify the waiters later.
 451     }
 452     current = next;
 453   }
 454   _first = nullptr;
 455   _last = nullptr;
 456 
 457   // Wake up all blocking task waiters to deal with remaining blocking
 458   // tasks. This is not a performance sensitive path, so we do this
 459   // unconditionally to simplify coding/testing.
 460   {
 461     MonitorLocker ml(Thread::current(), CompileTaskWait_lock);
 462     ml.notify_all();
 463   }
 464 
 465   // Wake up all threads that block on the queue.
 466   _lock->notify_all();
 467 }
 468 
 469 /**
 470  * Get the next CompileTask from a CompileQueue
 471  */
 472 CompileTask* CompileQueue::get(CompilerThread* thread) {
 473   // save methods from RedefineClasses across safepoint
 474   // across compile queue lock below.
 475   methodHandle save_method;
 476 
 477   MonitorLocker locker(_lock);
 478   transfer_pending();
 479 
 480   RecompilationPolicy::sample_load_average();
 481 
 482   // If _first is null we have no more compile jobs. There are two reasons for
 483   // having no compile jobs: First, we compiled everything we wanted. Second,
 484   // we ran out of code cache so compilation has been disabled. In the latter
 485   // case we perform code cache sweeps to free memory such that we can re-enable
 486   // compilation.
 487   while (_first == nullptr) {
 488     // Exit loop if compilation is disabled forever
 489     if (CompileBroker::is_compilation_disabled_forever()) {
 490       return nullptr;
 491     }
 492 
 493     AbstractCompiler* compiler = thread->compiler();
 494     guarantee(compiler != nullptr, "Compiler object must exist");
 495     compiler->on_empty_queue(this, thread);
 496     if (_first != nullptr) {
 497       // The call to on_empty_queue may have temporarily unlocked the MCQ lock
 498       // so check again whether any tasks were added to the queue.
 499       break;
 500     }
 501 
 502     // If we have added stale tasks, there might be waiters that want
 503     // the notification these tasks have failed. Normally, this would
 504     // be done by a compiler thread that would perform the purge at
 505     // the end of some compilation. But, if compile queue is empty,
 506     // there is no guarantee compilers would run and do the purge.
 507     // Do the purge here and now to unblock the waiters.
 508     // Perform this until we run out of stale tasks.
 509     while (_first_stale != nullptr) {
 510       purge_stale_tasks();
 511     }
 512     if (_first != nullptr) {
 513       // Purge stale tasks may have transferred some new tasks,
 514       // so check again.
 515       break;
 516     }
 517 
 518     // If there are no compilation tasks and we can compile new jobs
 519     // (i.e., there is enough free space in the code cache) there is
 520     // no need to invoke the GC.
 521     // We need a timed wait here, since compiler threads can exit if compilation
 522     // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
 523     // is not critical and we do not want idle compiler threads to wake up too often.
 524     locker.wait(5*1000);
 525 
 526     transfer_pending(); // reacquired lock
 527 
 528     if (RecompilationPolicy::have_recompilation_work()) return nullptr;
 529 
 530     if (UseDynamicNumberOfCompilerThreads && _first == nullptr) {
 531       // Still nothing to compile. Give caller a chance to stop this thread.
 532       if (CompileBroker::can_remove(CompilerThread::current(), false)) return nullptr;
 533     }
 534   }
 535 
 536   if (CompileBroker::is_compilation_disabled_forever()) {
 537     return nullptr;
 538   }
 539 
 540   CompileTask* task;
 541   {
 542     NoSafepointVerifier nsv;
 543     task = CompilationPolicy::select_task(this, thread);
 544     if (task != nullptr) {
 545       task = task->select_for_compilation();
 546     }
 547   }
 548 
 549   if (task != nullptr) {
 550     // Save method pointers across unlock safepoint.  The task is removed from
 551     // the compilation queue, which is walked during RedefineClasses.
 552     Thread* thread = Thread::current();
 553     save_method = methodHandle(thread, task->method());
 554 
 555     remove(task);
 556   }
 557   purge_stale_tasks(); // may temporarily release MCQ lock
 558   return task;
 559 }
 560 
 561 // Clean & deallocate stale compile tasks.
 562 // Temporarily releases MethodCompileQueue lock.
 563 void CompileQueue::purge_stale_tasks() {
 564   assert(_lock->owned_by_self(), "must own lock");
 565   if (_first_stale != nullptr) {
 566     // Stale tasks are purged when MCQ lock is released,
 567     // but _first_stale updates are protected by MCQ lock.
 568     // Once task processing starts and MCQ lock is released,
 569     // other compiler threads can reuse _first_stale.
 570     CompileTask* head = _first_stale;
 571     _first_stale = nullptr;
 572     {
 573       MutexUnlocker ul(_lock);
 574       for (CompileTask* task = head; task != nullptr; ) {
 575         CompileTask* next_task = task->next();
 576         task->set_next(nullptr);
 577         CompileTaskWrapper ctw(task); // Frees the task
 578         task->set_failure_reason("stale task");
 579         task = next_task;
 580       }
 581     }
 582     transfer_pending(); // transfer pending after reacquiring MCQ lock
 583   }
 584 }
 585 
 586 void CompileQueue::remove(CompileTask* task) {
 587   assert(_lock->owned_by_self(), "must own lock");
 588   if (task->prev() != nullptr) {
 589     task->prev()->set_next(task->next());
 590   } else {
 591     // max is the first element
 592     assert(task == _first, "Sanity");
 593     _first = task->next();
 594   }
 595 
 596   if (task->next() != nullptr) {
 597     task->next()->set_prev(task->prev());
 598   } else {
 599     // max is the last element
 600     assert(task == _last, "Sanity");
 601     _last = task->prev();
 602   }
 603   task->set_next(nullptr);
 604   task->set_prev(nullptr);
 605   --_size;
 606   ++_total_removed;
 607 }
 608 
 609 void CompileQueue::remove_and_mark_stale(CompileTask* task) {
 610   assert(_lock->owned_by_self(), "must own lock");
 611   remove(task);
 612 
 613   // Enqueue the task for reclamation (should be done outside MCQ lock)
 614   task->set_next(_first_stale);
 615   task->set_prev(nullptr);
 616   _first_stale = task;
 617 }
 618 
 619 // methods in the compile queue need to be marked as used on the stack
 620 // so that they don't get reclaimed by Redefine Classes
 621 void CompileQueue::mark_on_stack() {
 622   for (CompileTask* task = _first; task != nullptr; task = task->next()) {
 623     task->mark_on_stack();
 624   }
 625   for (CompileTask* task = _queue.first(); !_queue.is_end(task); task = task->next()) {
 626     assert(task != nullptr, "");
 627     task->mark_on_stack();
 628   }
 629 }
 630 
 631 
 632 CompileQueue* CompileBroker::compile_queue(int comp_level, bool is_aot) {
 633   if (is_c2_compile(comp_level)) return ((is_aot  && (_ac_count > 0)) ? _ac2_compile_queue : _c2_compile_queue);
 634   if (is_c1_compile(comp_level)) return ((is_aot && (_ac_count > 0)) ? _ac1_compile_queue : _c1_compile_queue);
 635   return nullptr;
 636 }
 637 
 638 CompileQueue* CompileBroker::c1_compile_queue() {
 639   return _c1_compile_queue;
 640 }
 641 
 642 CompileQueue* CompileBroker::c2_compile_queue() {
 643   return _c2_compile_queue;
 644 }
 645 
 646 void CompileBroker::print_compile_queues(outputStream* st) {
 647   st->print_cr("Current compiles: ");
 648 
 649   char buf[2000];
 650   int buflen = sizeof(buf);
 651   Threads::print_threads_compiling(st, buf, buflen, /* short_form = */ true);
 652 
 653   st->cr();
 654   if (_c1_compile_queue != nullptr) {
 655     _c1_compile_queue->print(st);
 656   }
 657   if (_c2_compile_queue != nullptr) {
 658     _c2_compile_queue->print(st);
 659   }
 660   if (_ac1_compile_queue != nullptr) {
 661     _ac1_compile_queue->print(st);
 662   }
 663   if (_ac2_compile_queue != nullptr) {
 664     _ac2_compile_queue->print(st);
 665   }
 666 }
 667 
 668 void CompileQueue::print(outputStream* st) {
 669   assert_locked_or_safepoint(_lock);
 670   st->print_cr("%s:", name());
 671   CompileTask* task = _first;
 672   if (task == nullptr) {
 673     st->print_cr("Empty");
 674   } else {
 675     while (task != nullptr) {
 676       task->print(st, nullptr, true, true);
 677       task = task->next();
 678     }
 679   }
 680   st->cr();
 681 }
 682 
 683 void CompileQueue::print_tty() {
 684   stringStream ss;
 685   // Dump the compile queue into a buffer before locking the tty
 686   print(&ss);
 687   {
 688     ttyLocker ttyl;
 689     tty->print("%s", ss.freeze());
 690   }
 691 }
 692 
 693 CompilerCounters::CompilerCounters() {
 694   _current_method[0] = '\0';
 695   _compile_type = CompileBroker::no_compile;
 696 }
 697 
 698 #if INCLUDE_JFR && COMPILER2_OR_JVMCI
 699 // It appends new compiler phase names to growable array phase_names(a new CompilerPhaseType mapping
 700 // in compiler/compilerEvent.cpp) and registers it with its serializer.
 701 //
 702 // c2 uses explicit CompilerPhaseType idToPhase mapping in opto/phasetype.hpp,
 703 // so if c2 is used, it should be always registered first.
 704 // This function is called during vm initialization.
 705 static void register_jfr_phasetype_serializer(CompilerType compiler_type) {
 706   ResourceMark rm;
 707   static bool first_registration = true;
 708   if (compiler_type == compiler_jvmci) {
 709     CompilerEvent::PhaseEvent::get_phase_id("NOT_A_PHASE_NAME", false, false, false);
 710     first_registration = false;
 711 #ifdef COMPILER2
 712   } else if (compiler_type == compiler_c2) {
 713     assert(first_registration, "invariant"); // c2 must be registered first.
 714     for (int i = 0; i < PHASE_NUM_TYPES; i++) {
 715       const char* phase_name = CompilerPhaseTypeHelper::to_description((CompilerPhaseType) i);
 716       CompilerEvent::PhaseEvent::get_phase_id(phase_name, false, false, false);
 717     }
 718     first_registration = false;
 719 #endif // COMPILER2
 720   }
 721 }
 722 #endif // INCLUDE_JFR && COMPILER2_OR_JVMCI
 723 
 724 // ------------------------------------------------------------------
 725 // CompileBroker::compilation_init
 726 //
 727 // Initialize the Compilation object
 728 void CompileBroker::compilation_init(JavaThread* THREAD) {
 729   // No need to initialize compilation system if we do not use it.
 730   if (!UseCompiler) {
 731     return;
 732   }
 733   // Set the interface to the current compiler(s).
 734   _c1_count = CompilationPolicy::c1_count();
 735   _c2_count = CompilationPolicy::c2_count();
 736   _ac_count = CompilationPolicy::ac_count();
 737 
 738 #if INCLUDE_JVMCI
 739   if (EnableJVMCI) {
 740     // This is creating a JVMCICompiler singleton.
 741     JVMCICompiler* jvmci = new JVMCICompiler();
 742 
 743     if (UseJVMCICompiler) {
 744       _compilers[1] = jvmci;
 745       if (FLAG_IS_DEFAULT(JVMCIThreads)) {
 746         if (BootstrapJVMCI) {
 747           // JVMCI will bootstrap so give it more threads
 748           _c2_count = MIN2(32, os::active_processor_count());
 749         }
 750       } else {
 751         _c2_count = JVMCIThreads;
 752       }
 753       if (FLAG_IS_DEFAULT(JVMCIHostThreads)) {
 754       } else {
 755 #ifdef COMPILER1
 756         _c1_count = JVMCIHostThreads;
 757 #endif // COMPILER1
 758       }
 759     }
 760   }
 761 #endif // INCLUDE_JVMCI
 762 
 763 #ifdef COMPILER1
 764   if (_c1_count > 0) {
 765     _compilers[0] = new Compiler();
 766   }
 767 #endif // COMPILER1
 768 
 769 #ifdef COMPILER2
 770   if (true JVMCI_ONLY( && !UseJVMCICompiler)) {
 771     if (_c2_count > 0) {
 772       _compilers[1] = new C2Compiler();
 773       // Register c2 first as c2 CompilerPhaseType idToPhase mapping is explicit.
 774       // idToPhase mapping for c2 is in opto/phasetype.hpp
 775       JFR_ONLY(register_jfr_phasetype_serializer(compiler_c2);)
 776     }
 777   }
 778 #endif // COMPILER2
 779 
 780 #if INCLUDE_JVMCI
 781    // Register after c2 registration.
 782    // JVMCI CompilerPhaseType idToPhase mapping is dynamic.
 783    if (EnableJVMCI) {
 784      JFR_ONLY(register_jfr_phasetype_serializer(compiler_jvmci);)
 785    }
 786 #endif // INCLUDE_JVMCI
 787 
 788   if (CompilerOracle::should_collect_memstat()) {
 789     CompilationMemoryStatistic::initialize();
 790   }
 791 
 792   // Start the compiler thread(s)
 793   init_compiler_threads();
 794   // totalTime performance counter is always created as it is required
 795   // by the implementation of java.lang.management.CompilationMXBean.
 796   {
 797     // Ensure OOM leads to vm_exit_during_initialization.
 798     EXCEPTION_MARK;
 799     _perf_total_compilation =
 800                  PerfDataManager::create_counter(JAVA_CI, "totalTime",
 801                                                  PerfData::U_Ticks, CHECK);
 802   }
 803 
 804   if (UsePerfData) {
 805 
 806     EXCEPTION_MARK;
 807 
 808     // create the jvmstat performance counters
 809     _perf_osr_compilation =
 810                  PerfDataManager::create_counter(SUN_CI, "osrTime",
 811                                                  PerfData::U_Ticks, CHECK);
 812 
 813     _perf_standard_compilation =
 814                  PerfDataManager::create_counter(SUN_CI, "standardTime",
 815                                                  PerfData::U_Ticks, CHECK);
 816 
 817     _perf_total_bailout_count =
 818                  PerfDataManager::create_counter(SUN_CI, "totalBailouts",
 819                                                  PerfData::U_Events, CHECK);
 820 
 821     _perf_total_invalidated_count =
 822                  PerfDataManager::create_counter(SUN_CI, "totalInvalidates",
 823                                                  PerfData::U_Events, CHECK);
 824 
 825     _perf_total_compile_count =
 826                  PerfDataManager::create_counter(SUN_CI, "totalCompiles",
 827                                                  PerfData::U_Events, CHECK);
 828     _perf_total_osr_compile_count =
 829                  PerfDataManager::create_counter(SUN_CI, "osrCompiles",
 830                                                  PerfData::U_Events, CHECK);
 831 
 832     _perf_total_standard_compile_count =
 833                  PerfDataManager::create_counter(SUN_CI, "standardCompiles",
 834                                                  PerfData::U_Events, CHECK);
 835 
 836     _perf_sum_osr_bytes_compiled =
 837                  PerfDataManager::create_counter(SUN_CI, "osrBytes",
 838                                                  PerfData::U_Bytes, CHECK);
 839 
 840     _perf_sum_standard_bytes_compiled =
 841                  PerfDataManager::create_counter(SUN_CI, "standardBytes",
 842                                                  PerfData::U_Bytes, CHECK);
 843 
 844     _perf_sum_nmethod_size =
 845                  PerfDataManager::create_counter(SUN_CI, "nmethodSize",
 846                                                  PerfData::U_Bytes, CHECK);
 847 
 848     _perf_sum_nmethod_code_size =
 849                  PerfDataManager::create_counter(SUN_CI, "nmethodCodeSize",
 850                                                  PerfData::U_Bytes, CHECK);
 851 
 852     _perf_last_method =
 853                  PerfDataManager::create_string_variable(SUN_CI, "lastMethod",
 854                                        CompilerCounters::cmname_buffer_length,
 855                                        "", CHECK);
 856 
 857     _perf_last_failed_method =
 858             PerfDataManager::create_string_variable(SUN_CI, "lastFailedMethod",
 859                                        CompilerCounters::cmname_buffer_length,
 860                                        "", CHECK);
 861 
 862     _perf_last_invalidated_method =
 863         PerfDataManager::create_string_variable(SUN_CI, "lastInvalidatedMethod",
 864                                      CompilerCounters::cmname_buffer_length,
 865                                      "", CHECK);
 866 
 867     _perf_last_compile_type =
 868              PerfDataManager::create_variable(SUN_CI, "lastType",
 869                                               PerfData::U_None,
 870                                               (jlong)CompileBroker::no_compile,
 871                                               CHECK);
 872 
 873     _perf_last_compile_size =
 874              PerfDataManager::create_variable(SUN_CI, "lastSize",
 875                                               PerfData::U_Bytes,
 876                                               (jlong)CompileBroker::no_compile,
 877                                               CHECK);
 878 
 879 
 880     _perf_last_failed_type =
 881              PerfDataManager::create_variable(SUN_CI, "lastFailedType",
 882                                               PerfData::U_None,
 883                                               (jlong)CompileBroker::no_compile,
 884                                               CHECK);
 885 
 886     _perf_last_invalidated_type =
 887          PerfDataManager::create_variable(SUN_CI, "lastInvalidatedType",
 888                                           PerfData::U_None,
 889                                           (jlong)CompileBroker::no_compile,
 890                                           CHECK);
 891   }
 892 
 893   log_info(aot, codecache, init)("CompileBroker is initialized");
 894   _initialized = true;
 895 }
 896 
 897 Handle CompileBroker::create_thread_oop(const char* name, TRAPS) {
 898   Handle thread_oop = JavaThread::create_system_thread_object(name, CHECK_NH);
 899   return thread_oop;
 900 }
 901 
 902 void TrainingReplayThread::training_replay_thread_entry(JavaThread* thread, TRAPS) {
 903   CompilationPolicy::replay_training_at_init_loop(thread);
 904 }
 905 
 906 #if defined(ASSERT) && COMPILER2_OR_JVMCI
 907 // Entry for DeoptimizeObjectsALotThread. The threads are started in
 908 // CompileBroker::init_compiler_threads() iff DeoptimizeObjectsALot is enabled
 909 void DeoptimizeObjectsALotThread::deopt_objs_alot_thread_entry(JavaThread* thread, TRAPS) {
 910     DeoptimizeObjectsALotThread* dt = ((DeoptimizeObjectsALotThread*) thread);
 911     bool enter_single_loop;
 912     {
 913       MonitorLocker ml(dt, EscapeBarrier_lock, Mutex::_no_safepoint_check_flag);
 914       static int single_thread_count = 0;
 915       enter_single_loop = single_thread_count++ < DeoptimizeObjectsALotThreadCountSingle;
 916     }
 917     if (enter_single_loop) {
 918       dt->deoptimize_objects_alot_loop_single();
 919     } else {
 920       dt->deoptimize_objects_alot_loop_all();
 921     }
 922   }
 923 
 924 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each
 925 // barrier targets a single thread which is selected round robin.
 926 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_single() {
 927   HandleMark hm(this);
 928   while (true) {
 929     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *deoptee_thread = jtiwh.next(); ) {
 930       { // Begin new scope for escape barrier
 931         HandleMarkCleaner hmc(this);
 932         ResourceMark rm(this);
 933         EscapeBarrier eb(true, this, deoptee_thread);
 934         eb.deoptimize_objects(100);
 935       }
 936       // Now sleep after the escape barriers destructor resumed deoptee_thread.
 937       sleep(DeoptimizeObjectsALotInterval);
 938     }
 939   }
 940 }
 941 
 942 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each
 943 // barrier targets all java threads in the vm at once.
 944 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_all() {
 945   HandleMark hm(this);
 946   while (true) {
 947     { // Begin new scope for escape barrier
 948       HandleMarkCleaner hmc(this);
 949       ResourceMark rm(this);
 950       EscapeBarrier eb(true, this);
 951       eb.deoptimize_objects_all_threads();
 952     }
 953     // Now sleep after the escape barriers destructor resumed the java threads.
 954     sleep(DeoptimizeObjectsALotInterval);
 955   }
 956 }
 957 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI
 958 
 959 
 960 JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, CompileQueue* queue, AbstractCompiler* comp, JavaThread* THREAD) {
 961   Handle thread_oop(THREAD, JNIHandles::resolve_non_null(thread_handle));
 962 
 963   if (java_lang_Thread::thread(thread_oop()) != nullptr) {
 964     assert(type == compiler_t, "should only happen with reused compiler threads");
 965     // The compiler thread hasn't actually exited yet so don't try to reuse it
 966     return nullptr;
 967   }
 968 
 969   JavaThread* new_thread = nullptr;
 970   switch (type) {
 971     case compiler_t:
 972       assert(comp != nullptr, "Compiler instance missing.");
 973       if (!InjectCompilerCreationFailure || comp->num_compiler_threads() == 0) {
 974         CompilerCounters* counters = new CompilerCounters();
 975         new_thread = new CompilerThread(queue, counters);
 976       }
 977       break;
 978 #if defined(ASSERT) && COMPILER2_OR_JVMCI
 979     case deoptimizer_t:
 980       new_thread = new DeoptimizeObjectsALotThread();
 981       break;
 982 #endif // ASSERT
 983     case training_replay_t:
 984       new_thread = new TrainingReplayThread();
 985       break;
 986     default:
 987       ShouldNotReachHere();
 988   }
 989 
 990   // At this point the new CompilerThread data-races with this startup
 991   // thread (which is the main thread and NOT the VM thread).
 992   // This means Java bytecodes being executed at startup can
 993   // queue compile jobs which will run at whatever default priority the
 994   // newly created CompilerThread runs at.
 995 
 996 
 997   // At this point it may be possible that no osthread was created for the
 998   // JavaThread due to lack of resources. We will handle that failure below.
 999   // Also check new_thread so that static analysis is happy.
1000   if (new_thread != nullptr && new_thread->osthread() != nullptr) {
1001 
1002     if (type == compiler_t) {
1003       CompilerThread::cast(new_thread)->set_compiler(comp);
1004     }
1005 
1006     // Note that we cannot call os::set_priority because it expects Java
1007     // priorities and we are *explicitly* using OS priorities so that it's
1008     // possible to set the compiler thread priority higher than any Java
1009     // thread.
1010 
1011     int native_prio = CompilerThreadPriority;
1012     if (native_prio == -1) {
1013       if (UseCriticalCompilerThreadPriority) {
1014         native_prio = os::java_to_os_priority[CriticalPriority];
1015       } else {
1016         native_prio = os::java_to_os_priority[NearMaxPriority];
1017       }
1018     }
1019     os::set_native_priority(new_thread, native_prio);
1020 
1021     // Note that this only sets the JavaThread _priority field, which by
1022     // definition is limited to Java priorities and not OS priorities.
1023     JavaThread::start_internal_daemon(THREAD, new_thread, thread_oop, NearMaxPriority);
1024 
1025   } else { // osthread initialization failure
1026     if (UseDynamicNumberOfCompilerThreads && type == compiler_t
1027         && comp->num_compiler_threads() > 0) {
1028       // The new thread is not known to Thread-SMR yet so we can just delete.
1029       delete new_thread;
1030       return nullptr;
1031     } else {
1032       vm_exit_during_initialization("java.lang.OutOfMemoryError",
1033                                     os::native_thread_creation_failed_msg());
1034     }
1035   }
1036 
1037   os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
1038 
1039   return new_thread;
1040 }
1041 
1042 static bool trace_compiler_threads() {
1043   LogTarget(Debug, jit, thread) lt;
1044   return TraceCompilerThreads || lt.is_enabled();
1045 }
1046 
1047 static jobject create_compiler_thread(AbstractCompiler* compiler, int i, TRAPS) {
1048   char name_buffer[256];
1049   os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", compiler->name(), i);
1050   Handle thread_oop = JavaThread::create_system_thread_object(name_buffer, CHECK_NULL);
1051   return JNIHandles::make_global(thread_oop);
1052 }
1053 
1054 static void print_compiler_threads(stringStream& msg) {
1055   if (TraceCompilerThreads) {
1056     tty->print_cr("%7d %s", (int)tty->time_stamp().milliseconds(), msg.as_string());
1057   }
1058   LogTarget(Debug, jit, thread) lt;
1059   if (lt.is_enabled()) {
1060     LogStream ls(lt);
1061     ls.print_cr("%s", msg.as_string());
1062   }
1063 }
1064 
1065 static void print_compiler_thread(JavaThread *ct) {
1066   if (trace_compiler_threads()) {
1067     ResourceMark rm;
1068     ThreadsListHandle tlh;  // name() depends on the TLH.
1069     assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
1070     stringStream msg;
1071     msg.print("Added initial compiler thread %s", ct->name());
1072     print_compiler_threads(msg);
1073   }
1074 }
1075 
1076 void CompileBroker::init_compiler_threads() {
1077   // Ensure any exceptions lead to vm_exit_during_initialization.
1078   EXCEPTION_MARK;
1079 #if !defined(ZERO)
1080   assert(_c2_count > 0 || _c1_count > 0, "No compilers?");
1081 #endif // !ZERO
1082   // Initialize the compilation queue
1083   if (_c2_count > 0) {
1084     const char* name = JVMCI_ONLY(UseJVMCICompiler ? "JVMCI compile queue" :) "C2 compile queue";
1085     _c2_compile_queue  = new CompileQueue(name, MethodCompileQueueC2_lock);
1086     _compiler2_objects = NEW_C_HEAP_ARRAY(jobject, _c2_count, mtCompiler);
1087     _compiler2_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c2_count, mtCompiler);
1088   }
1089   if (_c1_count > 0) {
1090     _c1_compile_queue  = new CompileQueue("C1 compile queue", MethodCompileQueueC1_lock);
1091     _compiler1_objects = NEW_C_HEAP_ARRAY(jobject, _c1_count, mtCompiler);
1092     _compiler1_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c1_count, mtCompiler);
1093   }
1094 
1095   if (_ac_count > 0) {
1096     if (_c1_count > 0) { // C1 is present
1097       _ac1_compile_queue  = new CompileQueue("C1 AOT code compile queue", MethodCompileQueueSC1_lock);
1098     }
1099     if (_c2_count > 0) { // C2 is present
1100       _ac2_compile_queue  = new CompileQueue("C2 AOT code compile queue", MethodCompileQueueSC2_lock);
1101     }
1102     _ac_objects = NEW_C_HEAP_ARRAY(jobject, _ac_count, mtCompiler);
1103     _ac_logs = NEW_C_HEAP_ARRAY(CompileLog*, _ac_count, mtCompiler);
1104   }
1105   char name_buffer[256];
1106 
1107   for (int i = 0; i < _c2_count; i++) {
1108     // Create a name for our thread.
1109     jobject thread_handle = create_compiler_thread(_compilers[1], i, CHECK);
1110     _compiler2_objects[i] = thread_handle;
1111     _compiler2_logs[i] = nullptr;
1112 
1113     if (!UseDynamicNumberOfCompilerThreads || i == 0) {
1114       JavaThread *ct = make_thread(compiler_t, thread_handle, _c2_compile_queue, _compilers[1], THREAD);
1115       assert(ct != nullptr, "should have been handled for initial thread");
1116       _compilers[1]->set_num_compiler_threads(i + 1);
1117       print_compiler_thread(ct);
1118     }
1119   }
1120 
1121   for (int i = 0; i < _c1_count; i++) {
1122     // Create a name for our thread.
1123     jobject thread_handle = create_compiler_thread(_compilers[0], i, CHECK);
1124     _compiler1_objects[i] = thread_handle;
1125     _compiler1_logs[i] = nullptr;
1126 
1127     if (!UseDynamicNumberOfCompilerThreads || i == 0) {
1128       JavaThread *ct = make_thread(compiler_t, thread_handle, _c1_compile_queue, _compilers[0], THREAD);
1129       assert(ct != nullptr, "should have been handled for initial thread");
1130       _compilers[0]->set_num_compiler_threads(i + 1);
1131       print_compiler_thread(ct);
1132     }
1133   }
1134 
1135   if (_ac_count > 0) {
1136     int i = 0;
1137     if (_c1_count > 0) { // C1 is present
1138       os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d AOT code caching CompilerThread", 1);
1139       Handle thread_oop = create_thread_oop(name_buffer, CHECK);
1140       jobject thread_handle = JNIHandles::make_global(thread_oop);
1141       _ac_objects[i] = thread_handle;
1142       _ac_logs[i] = nullptr;
1143       i++;
1144 
1145       JavaThread *ct = make_thread(compiler_t, thread_handle, _ac1_compile_queue, _compilers[0], THREAD);
1146       assert(ct != nullptr, "should have been handled for initial thread");
1147       print_compiler_thread(ct);
1148     }
1149     if (_c2_count > 0) { // C2 is present
1150       os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d AOT code caching CompilerThread", 2);
1151       Handle thread_oop = create_thread_oop(name_buffer, CHECK);
1152       jobject thread_handle = JNIHandles::make_global(thread_oop);
1153       _ac_objects[i] = thread_handle;
1154       _ac_logs[i] = nullptr;
1155 
1156       JavaThread *ct = make_thread(compiler_t, thread_handle, _ac2_compile_queue, _compilers[1], THREAD);
1157       assert(ct != nullptr, "should have been handled for initial thread");
1158       print_compiler_thread(ct);
1159     }
1160   }
1161 
1162   if (UsePerfData) {
1163     PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, _c1_count + _c2_count, CHECK);
1164   }
1165 
1166 #if defined(ASSERT) && COMPILER2_OR_JVMCI
1167   if (DeoptimizeObjectsALot) {
1168     // Initialize and start the object deoptimizer threads
1169     const int total_count = DeoptimizeObjectsALotThreadCountSingle + DeoptimizeObjectsALotThreadCountAll;
1170     for (int count = 0; count < total_count; count++) {
1171       Handle thread_oop = JavaThread::create_system_thread_object("Deoptimize objects a lot single mode", CHECK);
1172       jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop());
1173       make_thread(deoptimizer_t, thread_handle, nullptr, nullptr, THREAD);
1174     }
1175   }
1176 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI
1177 }
1178 
1179 void CompileBroker::init_training_replay() {
1180   // Ensure any exceptions lead to vm_exit_during_initialization.
1181   EXCEPTION_MARK;
1182   if (TrainingData::have_data()) {
1183     Handle thread_oop = create_thread_oop("Training replay thread", CHECK);
1184     jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop());
1185     make_thread(training_replay_t, thread_handle, nullptr, nullptr, THREAD);
1186   }
1187 }
1188 
1189 void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) {
1190 
1191   int old_c2_count = 0, new_c2_count = 0, old_c1_count = 0, new_c1_count = 0;
1192   const int c2_tasks_per_thread = 2, c1_tasks_per_thread = 4;
1193 
1194   // Quick check if we already have enough compiler threads without taking the lock.
1195   // Numbers may change concurrently, so we read them again after we have the lock.
1196   if (_c2_compile_queue != nullptr) {
1197     old_c2_count = get_c2_thread_count();
1198     new_c2_count = MIN2(_c2_count, _c2_compile_queue->size() / c2_tasks_per_thread);
1199   }
1200   if (_c1_compile_queue != nullptr) {
1201     old_c1_count = get_c1_thread_count();
1202     new_c1_count = MIN2(_c1_count, _c1_compile_queue->size() / c1_tasks_per_thread);
1203   }
1204   if (new_c2_count <= old_c2_count && new_c1_count <= old_c1_count) return;
1205 
1206   // Now, we do the more expensive operations.
1207   size_t free_memory = 0;
1208   // Return value ignored - defaulting to 0 on failure.
1209   (void)os::free_memory(free_memory);
1210   // If SegmentedCodeCache is off, both values refer to the single heap (with type CodeBlobType::All).
1211   size_t available_cc_np = CodeCache::unallocated_capacity(CodeBlobType::MethodNonProfiled),
1212          available_cc_p  = CodeCache::unallocated_capacity(CodeBlobType::MethodProfiled);
1213 
1214   // Only attempt to start additional threads if the lock is free.
1215   if (!CompileThread_lock->try_lock()) return;
1216 
1217   if (_c2_compile_queue != nullptr) {
1218     old_c2_count = get_c2_thread_count();
1219     new_c2_count = MIN4(_c2_count,
1220         _c2_compile_queue->size() / c2_tasks_per_thread,
1221         (int)(free_memory / (200*M)),
1222         (int)(available_cc_np / (128*K)));
1223 
1224     for (int i = old_c2_count; i < new_c2_count; i++) {
1225 #if INCLUDE_JVMCI
1226       if (UseJVMCICompiler && !UseJVMCINativeLibrary && _compiler2_objects[i] == nullptr) {
1227         // Native compiler threads as used in C1/C2 can reuse the j.l.Thread objects as their
1228         // existence is completely hidden from the rest of the VM (and those compiler threads can't
1229         // call Java code to do the creation anyway).
1230         //
1231         // For pure Java JVMCI we have to create new j.l.Thread objects as they are visible and we
1232         // can see unexpected thread lifecycle transitions if we bind them to new JavaThreads.  For
1233         // native library JVMCI it's preferred to use the C1/C2 strategy as this avoids unnecessary
1234         // coupling with Java.
1235         if (!THREAD->can_call_java()) break;
1236         char name_buffer[256];
1237         os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", _compilers[1]->name(), i);
1238         Handle thread_oop;
1239         {
1240           // We have to give up the lock temporarily for the Java calls.
1241           MutexUnlocker mu(CompileThread_lock);
1242           thread_oop = JavaThread::create_system_thread_object(name_buffer, THREAD);
1243         }
1244         if (HAS_PENDING_EXCEPTION) {
1245           if (trace_compiler_threads()) {
1246             ResourceMark rm;
1247             stringStream msg;
1248             msg.print_cr("JVMCI compiler thread creation failed:");
1249             PENDING_EXCEPTION->print_on(&msg);
1250             print_compiler_threads(msg);
1251           }
1252           CLEAR_PENDING_EXCEPTION;
1253           break;
1254         }
1255         // Check if another thread has beaten us during the Java calls.
1256         if (get_c2_thread_count() != i) break;
1257         jobject thread_handle = JNIHandles::make_global(thread_oop);
1258         assert(compiler2_object(i) == nullptr, "Old one must be released!");
1259         _compiler2_objects[i] = thread_handle;
1260       }
1261 #endif
1262       guarantee(compiler2_object(i) != nullptr, "Thread oop must exist");
1263       JavaThread *ct = make_thread(compiler_t, compiler2_object(i), _c2_compile_queue, _compilers[1], THREAD);
1264       if (ct == nullptr) break;
1265       _compilers[1]->set_num_compiler_threads(i + 1);
1266       if (trace_compiler_threads()) {
1267         ResourceMark rm;
1268         ThreadsListHandle tlh;  // name() depends on the TLH.
1269         assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
1270         stringStream msg;
1271         msg.print("Added compiler thread %s (free memory: %dMB, available non-profiled code cache: %dMB)",
1272                   ct->name(), (int)(free_memory/M), (int)(available_cc_np/M));
1273         print_compiler_threads(msg);
1274       }
1275     }
1276   }
1277 
1278   if (_c1_compile_queue != nullptr) {
1279     old_c1_count = get_c1_thread_count();
1280     new_c1_count = MIN4(_c1_count,
1281         _c1_compile_queue->size() / c1_tasks_per_thread,
1282         (int)(free_memory / (100*M)),
1283         (int)(available_cc_p / (128*K)));
1284 
1285     for (int i = old_c1_count; i < new_c1_count; i++) {
1286       JavaThread *ct = make_thread(compiler_t, compiler1_object(i), _c1_compile_queue, _compilers[0], THREAD);
1287       if (ct == nullptr) break;
1288       _compilers[0]->set_num_compiler_threads(i + 1);
1289       if (trace_compiler_threads()) {
1290         ResourceMark rm;
1291         ThreadsListHandle tlh;  // name() depends on the TLH.
1292         assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
1293         stringStream msg;
1294         msg.print("Added compiler thread %s (free memory: %dMB, available profiled code cache: %dMB)",
1295                   ct->name(), (int)(free_memory/M), (int)(available_cc_p/M));
1296         print_compiler_threads(msg);
1297       }
1298     }
1299   }
1300 
1301   CompileThread_lock->unlock();
1302 }
1303 
1304 
1305 /**
1306  * Set the methods on the stack as on_stack so that redefine classes doesn't
1307  * reclaim them. This method is executed at a safepoint.
1308  */
1309 void CompileBroker::mark_on_stack() {
1310   assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
1311   // Since we are at a safepoint, we do not need a lock to access
1312   // the compile queues.
1313   if (_c2_compile_queue != nullptr) {
1314     _c2_compile_queue->mark_on_stack();
1315   }
1316   if (_c1_compile_queue != nullptr) {
1317     _c1_compile_queue->mark_on_stack();
1318   }
1319   if (_ac1_compile_queue != nullptr) {
1320     _ac1_compile_queue->mark_on_stack();
1321   }
1322   if (_ac2_compile_queue != nullptr) {
1323     _ac2_compile_queue->mark_on_stack();
1324   }
1325 }
1326 
1327 // ------------------------------------------------------------------
1328 // CompileBroker::compile_method
1329 //
1330 // Request compilation of a method.
1331 void CompileBroker::compile_method_base(const methodHandle& method,
1332                                         int osr_bci,
1333                                         int comp_level,
1334                                         int hot_count,
1335                                         CompileTask::CompileReason compile_reason,
1336                                         bool requires_online_compilation,
1337                                         bool blocking,
1338                                         Thread* thread) {
1339   guarantee(!method->is_abstract(), "cannot compile abstract methods");
1340   assert(method->method_holder()->is_instance_klass(),
1341          "sanity check");
1342   assert(!method->method_holder()->is_not_initialized()   ||
1343          compile_reason == CompileTask::Reason_Preload    ||
1344          compile_reason == CompileTask::Reason_Precompile ||
1345          compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized");
1346   assert(!method->is_method_handle_intrinsic(), "do not enqueue these guys");
1347 
1348   if (CIPrintRequests) {
1349     tty->print("request: ");
1350     method->print_short_name(tty);
1351     if (osr_bci != InvocationEntryBci) {
1352       tty->print(" osr_bci: %d", osr_bci);
1353     }
1354     tty->print(" level: %d comment: %s count: %d", comp_level, CompileTask::reason_name(compile_reason), hot_count);
1355     if (hot_count > 0) {
1356       tty->print(" hot: yes");
1357     }
1358     tty->cr();
1359   }
1360 
1361   // A request has been made for compilation.  Before we do any
1362   // real work, check to see if the method has been compiled
1363   // in the meantime with a definitive result.
1364   if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) {
1365     return;
1366   }
1367 
1368 #ifndef PRODUCT
1369   if (osr_bci != -1 && !FLAG_IS_DEFAULT(OSROnlyBCI)) {
1370     if ((OSROnlyBCI > 0) ? (OSROnlyBCI != osr_bci) : (-OSROnlyBCI == osr_bci)) {
1371       // Positive OSROnlyBCI means only compile that bci.  Negative means don't compile that BCI.
1372       return;
1373     }
1374   }
1375 #endif
1376 
1377   // If this method is already in the compile queue, then
1378   // we do not block the current thread.
1379   if (compilation_is_in_queue(method)) {
1380     // We may want to decay our counter a bit here to prevent
1381     // multiple denied requests for compilation.  This is an
1382     // open compilation policy issue. Note: The other possibility,
1383     // in the case that this is a blocking compile request, is to have
1384     // all subsequent blocking requesters wait for completion of
1385     // ongoing compiles. Note that in this case we'll need a protocol
1386     // for freeing the associated compile tasks. [Or we could have
1387     // a single static monitor on which all these waiters sleep.]
1388     return;
1389   }
1390 
1391   // Tiered policy requires MethodCounters to exist before adding a method to
1392   // the queue. Create if we don't have them yet.
1393   if (compile_reason != CompileTask::Reason_Preload) {
1394     method->get_method_counters(thread);
1395   }
1396 
1397   AOTCodeEntry* aot_code_entry = find_aot_code_entry(method, osr_bci, comp_level, compile_reason, requires_online_compilation);
1398   bool is_aot = (aot_code_entry != nullptr);
1399 
1400   // Outputs from the following MutexLocker block:
1401   CompileTask* task = nullptr;
1402   CompileQueue* queue = compile_queue(comp_level, is_aot);
1403 
1404   // Acquire our lock.
1405   {
1406     ConditionalMutexLocker locker(thread, queue->lock(), !UseLockFreeCompileQueues);
1407 
1408     // Make sure the method has not slipped into the queues since
1409     // last we checked; note that those checks were "fast bail-outs".
1410     // Here we need to be more careful, see 14012000 below.
1411     if (compilation_is_in_queue(method)) {
1412       return;
1413     }
1414 
1415     // We need to check again to see if the compilation has
1416     // completed.  A previous compilation may have registered
1417     // some result.
1418     if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) {
1419       return;
1420     }
1421 
1422     // We now know that this compilation is not pending, complete,
1423     // or prohibited.  Assign a compile_id to this compilation
1424     // and check to see if it is in our [Start..Stop) range.
1425     int compile_id = assign_compile_id(method, osr_bci);
1426     if (compile_id == 0) {
1427       // The compilation falls outside the allowed range.
1428       return;
1429     }
1430 
1431 #if INCLUDE_JVMCI
1432     if (UseJVMCICompiler && blocking) {
1433       // Don't allow blocking compiles for requests triggered by JVMCI.
1434       if (thread->is_Compiler_thread()) {
1435         blocking = false;
1436       }
1437 
1438       // In libjvmci, JVMCI initialization should not deadlock with other threads
1439       if (!UseJVMCINativeLibrary) {
1440         // Don't allow blocking compiles if inside a class initializer or while performing class loading
1441         vframeStream vfst(JavaThread::cast(thread));
1442         for (; !vfst.at_end(); vfst.next()) {
1443           if (vfst.method()->is_static_initializer() ||
1444               (vfst.method()->method_holder()->is_subclass_of(vmClasses::ClassLoader_klass()) &&
1445                   vfst.method()->name() == vmSymbols::loadClass_name())) {
1446             blocking = false;
1447             break;
1448           }
1449         }
1450 
1451         // Don't allow blocking compilation requests to JVMCI
1452         // if JVMCI itself is not yet initialized
1453         if (!JVMCI::is_compiler_initialized() && compiler(comp_level)->is_jvmci()) {
1454           blocking = false;
1455         }
1456       }
1457 
1458       // Don't allow blocking compilation requests if we are in JVMCIRuntime::shutdown
1459       // to avoid deadlock between compiler thread(s) and threads run at shutdown
1460       // such as the DestroyJavaVM thread.
1461       if (JVMCI::in_shutdown()) {
1462         blocking = false;
1463       }
1464     }
1465 #endif // INCLUDE_JVMCI
1466 
1467     // We will enter the compilation in the queue.
1468     // 14012000: Note that this sets the queued_for_compile bits in
1469     // the target method. We can now reason that a method cannot be
1470     // queued for compilation more than once, as follows:
1471     // Before a thread queues a task for compilation, it first acquires
1472     // the compile queue lock, then checks if the method's queued bits
1473     // are set or it has already been compiled. Thus there can not be two
1474     // instances of a compilation task for the same method on the
1475     // compilation queue. Consider now the case where the compilation
1476     // thread has already removed a task for that method from the queue
1477     // and is in the midst of compiling it. In this case, the
1478     // queued_for_compile bits must be set in the method (and these
1479     // will be visible to the current thread, since the bits were set
1480     // under protection of the compile queue lock, which we hold now.
1481     // When the compilation completes, the compiler thread first sets
1482     // the compilation result and then clears the queued_for_compile
1483     // bits. Neither of these actions are protected by a barrier (or done
1484     // under the protection of a lock), so the only guarantee we have
1485     // (on machines with TSO (Total Store Order)) is that these values
1486     // will update in that order. As a result, the only combinations of
1487     // these bits that the current thread will see are, in temporal order:
1488     // <RESULT, QUEUE> :
1489     //     <0, 1> : in compile queue, but not yet compiled
1490     //     <1, 1> : compiled but queue bit not cleared
1491     //     <1, 0> : compiled and queue bit cleared
1492     // Because we first check the queue bits then check the result bits,
1493     // we are assured that we cannot introduce a duplicate task.
1494     // Note that if we did the tests in the reverse order (i.e. check
1495     // result then check queued bit), we could get the result bit before
1496     // the compilation completed, and the queue bit after the compilation
1497     // completed, and end up introducing a "duplicate" (redundant) task.
1498     // In that case, the compiler thread should first check if a method
1499     // has already been compiled before trying to compile it.
1500     // NOTE: in the event that there are multiple compiler threads and
1501     // there is de-optimization/recompilation, things will get hairy,
1502     // and in that case it's best to protect both the testing (here) of
1503     // these bits, and their updating (here and elsewhere) under a
1504     // common lock.
1505     task = create_compile_task(queue,
1506                                compile_id, method,
1507                                osr_bci, comp_level,
1508                                hot_count, aot_code_entry, compile_reason,
1509                                requires_online_compilation, blocking);
1510 
1511     if (task->is_aot_load() && (_ac_count > 0)) {
1512       // Put it on AOT code caching queue
1513       queue = is_c1_compile(comp_level) ? _ac1_compile_queue : _ac2_compile_queue;
1514     }
1515 
1516     if (UseLockFreeCompileQueues) {
1517       assert(queue->lock()->owned_by_self() == false, "");
1518       queue->add_pending(task);
1519     } else {
1520       queue->add(task);
1521     }
1522   }
1523 
1524   if (blocking) {
1525     wait_for_completion(task);
1526   }
1527 }
1528 
1529 AOTCodeEntry* CompileBroker::find_aot_code_entry(const methodHandle& method, int osr_bci, int comp_level,
1530                                         CompileTask::CompileReason compile_reason,
1531                                         bool requires_online_compilation) {
1532   if (requires_online_compilation || compile_reason == CompileTask::Reason_Whitebox) {
1533     return nullptr; // Need normal JIT compilation
1534   }
1535   AOTCodeEntry* aot_code_entry = nullptr;
1536   if (osr_bci == InvocationEntryBci && AOTCodeCache::is_using_code()) {
1537     // Check for AOT preload code first.
1538     if (compile_reason == CompileTask::Reason_Preload) {
1539       aot_code_entry = method->aot_code_entry();
1540       assert(aot_code_entry != nullptr && aot_code_entry->for_preload(), "sanity");
1541     } else {
1542       aot_code_entry = AOTCodeCache::find_code_entry(method, comp_level);
1543     }
1544   }
1545   return aot_code_entry;
1546 }
1547 
1548 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
1549                                        int comp_level,
1550                                        int hot_count,
1551                                        bool requires_online_compilation,
1552                                        CompileTask::CompileReason compile_reason,
1553                                        TRAPS) {
1554   // Do nothing if compilebroker is not initialized or compiles are submitted on level none
1555   if (!_initialized || comp_level == CompLevel_none) {
1556     return nullptr;
1557   }
1558 
1559 #if INCLUDE_JVMCI
1560   if (EnableJVMCI && UseJVMCICompiler &&
1561       comp_level == CompLevel_full_optimization && !AOTLinkedClassBulkLoader::class_preloading_finished()) {
1562     return nullptr;
1563   }
1564 #endif
1565 
1566   AbstractCompiler *comp = CompileBroker::compiler(comp_level);
1567   assert(comp != nullptr, "Ensure we have a compiler");
1568 
1569 #if INCLUDE_JVMCI
1570   if (comp->is_jvmci() && !JVMCI::can_initialize_JVMCI()) {
1571     // JVMCI compilation is not yet initializable.
1572     return nullptr;
1573   }
1574 #endif
1575 
1576   DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp);
1577   // CompileBroker::compile_method can trap and can have pending async exception.
1578   nmethod* nm = CompileBroker::compile_method(method, osr_bci, comp_level, hot_count, requires_online_compilation, compile_reason, directive, THREAD);
1579   DirectivesStack::release(directive);
1580   return nm;
1581 }
1582 
1583 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
1584                                          int comp_level,
1585                                          int hot_count,
1586                                          bool requires_online_compilation,
1587                                          CompileTask::CompileReason compile_reason,
1588                                          DirectiveSet* directive,
1589                                          TRAPS) {
1590 
1591   // make sure arguments make sense
1592   assert(method->method_holder()->is_instance_klass(), "not an instance method");
1593   assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
1594   assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods");
1595   assert(!method->method_holder()->is_not_initialized()   ||
1596          compile_reason == CompileTask::Reason_Preload    ||
1597          compile_reason == CompileTask::Reason_Precompile ||
1598          compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized");
1599   // return quickly if possible
1600   bool aot_compilation = (PrecompileCode && PrecompileOnlyAndExit) ||
1601                          CDSConfig::is_dumping_aot_code();
1602   if (aot_compilation && !CompileTask::reason_is_precompile(compile_reason)) {
1603     // Skip normal compilations when compiling AOT code
1604     return nullptr;
1605   }
1606 
1607   // lock, make sure that the compilation
1608   // isn't prohibited in a straightforward way.
1609   AbstractCompiler* comp = CompileBroker::compiler(comp_level);
1610   if (comp == nullptr || compilation_is_prohibited(method, osr_bci, comp_level, directive->ExcludeOption)) {
1611     return nullptr;
1612   }
1613 
1614   if (osr_bci == InvocationEntryBci) {
1615     // standard compilation
1616     nmethod* method_code = method->code();
1617     if (method_code != nullptr) {
1618       if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) {
1619         return method_code;
1620       }
1621     }
1622     if (method->is_not_compilable(comp_level)) {
1623       return nullptr;
1624     }
1625   } else {
1626     // osr compilation
1627     // We accept a higher level osr method
1628     nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
1629     if (nm != nullptr) return nm;
1630     if (method->is_not_osr_compilable(comp_level)) return nullptr;
1631   }
1632 
1633   assert(!HAS_PENDING_EXCEPTION, "No exception should be present");
1634   // some prerequisites that are compiler specific
1635   if (compile_reason != CompileTask::Reason_Preload &&
1636       !CompileTask::reason_is_precompile(compile_reason) &&
1637      (comp->is_c2() || comp->is_jvmci())) {
1638     InternalOOMEMark iom(THREAD);
1639     method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NONASYNC_NULL);
1640     // Resolve all classes seen in the signature of the method
1641     // we are compiling.
1642     Method::load_signature_classes(method, CHECK_AND_CLEAR_NONASYNC_NULL);
1643   }
1644 
1645   // If the method is native, do the lookup in the thread requesting
1646   // the compilation. Native lookups can load code, which is not
1647   // permitted during compilation.
1648   //
1649   // Note: A native method implies non-osr compilation which is
1650   //       checked with an assertion at the entry of this method.
1651   if (method->is_native() && !method->is_method_handle_intrinsic()) {
1652     address adr = NativeLookup::lookup(method, THREAD);
1653     if (HAS_PENDING_EXCEPTION) {
1654       // In case of an exception looking up the method, we just forget
1655       // about it. The interpreter will kick-in and throw the exception.
1656       method->set_not_compilable("NativeLookup::lookup failed"); // implies is_not_osr_compilable()
1657       CLEAR_PENDING_EXCEPTION;
1658       return nullptr;
1659     }
1660     assert(method->has_native_function(), "must have native code by now");
1661   }
1662 
1663   // RedefineClasses() has replaced this method; just return
1664   if (method->is_old()) {
1665     return nullptr;
1666   }
1667 
1668   // JVMTI -- post_compile_event requires jmethod_id() that may require
1669   // a lock the compiling thread can not acquire. Prefetch it here.
1670   if (JvmtiExport::should_post_compiled_method_load()) {
1671     method->jmethod_id();
1672   }
1673 
1674   // do the compilation
1675   if (method->is_native()) {
1676     if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) {
1677       // To properly handle the appendix argument for out-of-line calls we are using a small trampoline that
1678       // pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime).
1679       //
1680       // Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter
1681       // in this case.  If we can't generate one and use it we can not execute the out-of-line method handle calls.
1682       AdapterHandlerLibrary::create_native_wrapper(method);
1683     } else {
1684       return nullptr;
1685     }
1686   } else {
1687     // If the compiler is shut off due to code cache getting full
1688     // fail out now so blocking compiles dont hang the java thread
1689     if (!should_compile_new_jobs()) {
1690       return nullptr;
1691     }
1692     bool is_blocking = ReplayCompiles                                             ||
1693                        !directive->BackgroundCompilationOption                    ||
1694                        (PreloadBlocking && (compile_reason == CompileTask::Reason_Preload));
1695     compile_method_base(method, osr_bci, comp_level, hot_count, compile_reason, requires_online_compilation, is_blocking, THREAD);
1696   }
1697 
1698   // return requested nmethod
1699   // We accept a higher level osr method
1700   if (osr_bci == InvocationEntryBci) {
1701     return method->code();
1702   }
1703   return method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
1704 }
1705 
1706 
1707 // ------------------------------------------------------------------
1708 // CompileBroker::compilation_is_complete
1709 //
1710 // See if compilation of this method is already complete.
1711 bool CompileBroker::compilation_is_complete(Method*                    method,
1712                                             int                        osr_bci,
1713                                             int                        comp_level,
1714                                             bool                       online_only,
1715                                             CompileTask::CompileReason compile_reason) {
1716   if (compile_reason == CompileTask::Reason_Precompile ||
1717       compile_reason == CompileTask::Reason_PrecompileForPreload) {
1718     return false; // FIXME: any restrictions?
1719   }
1720   bool is_osr = (osr_bci != standard_entry_bci);
1721   if (is_osr) {
1722     if (method->is_not_osr_compilable(comp_level)) {
1723       return true;
1724     } else {
1725       nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true);
1726       return (result != nullptr);
1727     }
1728   } else {
1729     if (method->is_not_compilable(comp_level)) {
1730       return true;
1731     } else {
1732       nmethod* result = method->code();
1733       if (result == nullptr) {
1734         return false;
1735       }
1736       if (online_only && result->is_aot()) {
1737         return false;
1738       }
1739       bool same_level = (comp_level == result->comp_level());
1740       if (result->has_clinit_barriers()) {
1741         return !same_level; // Allow replace preloaded code with new code of the same level
1742       }
1743       return same_level;
1744     }
1745   }
1746 }
1747 
1748 
1749 /**
1750  * See if this compilation is already requested.
1751  *
1752  * Implementation note: there is only a single "is in queue" bit
1753  * for each method.  This means that the check below is overly
1754  * conservative in the sense that an osr compilation in the queue
1755  * will block a normal compilation from entering the queue (and vice
1756  * versa).  This can be remedied by a full queue search to disambiguate
1757  * cases.  If it is deemed profitable, this may be done.
1758  */
1759 bool CompileBroker::compilation_is_in_queue(const methodHandle& method) {
1760   return method->queued_for_compilation();
1761 }
1762 
1763 // ------------------------------------------------------------------
1764 // CompileBroker::compilation_is_prohibited
1765 //
1766 // See if this compilation is not allowed.
1767 bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded) {
1768   bool is_native = method->is_native();
1769   // Some compilers may not support the compilation of natives.
1770   AbstractCompiler *comp = compiler(comp_level);
1771   if (is_native && (!CICompileNatives || comp == nullptr)) {
1772     method->set_not_compilable_quietly("native methods not supported", comp_level);
1773     return true;
1774   }
1775 
1776   bool is_osr = (osr_bci != standard_entry_bci);
1777   // Some compilers may not support on stack replacement.
1778   if (is_osr && (!CICompileOSR || comp == nullptr)) {
1779     method->set_not_osr_compilable("OSR not supported", comp_level);
1780     return true;
1781   }
1782 
1783   // The method may be explicitly excluded by the user.
1784   double scale;
1785   if (excluded || (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, scale) && scale == 0)) {
1786     bool quietly = CompilerOracle::be_quiet();
1787     if (PrintCompilation && !quietly) {
1788       // This does not happen quietly...
1789       ResourceMark rm;
1790       tty->print("### Excluding %s:%s",
1791                  method->is_native() ? "generation of native wrapper" : "compile",
1792                  (method->is_static() ? " static" : ""));
1793       method->print_short_name(tty);
1794       tty->cr();
1795     }
1796     method->set_not_compilable("excluded by CompileCommand", comp_level, !quietly);
1797   }
1798 
1799   return false;
1800 }
1801 
1802 /**
1803  * Generate serialized IDs for compilation requests. If certain debugging flags are used
1804  * and the ID is not within the specified range, the method is not compiled and 0 is returned.
1805  * The function also allows to generate separate compilation IDs for OSR compilations.
1806  */
1807 int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) {
1808 #ifdef ASSERT
1809   bool is_osr = (osr_bci != standard_entry_bci);
1810   int id;
1811   if (method->is_native()) {
1812     assert(!is_osr, "can't be osr");
1813     // Adapters, native wrappers and method handle intrinsics
1814     // should be generated always.
1815     return Atomic::add(CICountNative ? &_native_compilation_id : &_compilation_id, 1);
1816   } else if (CICountOSR && is_osr) {
1817     id = Atomic::add(&_osr_compilation_id, 1);
1818     if (CIStartOSR <= id && id < CIStopOSR) {
1819       return id;
1820     }
1821   } else {
1822     id = Atomic::add(&_compilation_id, 1);
1823     if (CIStart <= id && id < CIStop) {
1824       return id;
1825     }
1826   }
1827 
1828   // Method was not in the appropriate compilation range.
1829   method->set_not_compilable_quietly("Not in requested compile id range");
1830   return 0;
1831 #else
1832   // CICountOSR is a develop flag and set to 'false' by default. In a product built,
1833   // only _compilation_id is incremented.
1834   return Atomic::add(&_compilation_id, 1);
1835 #endif
1836 }
1837 
1838 // ------------------------------------------------------------------
1839 // CompileBroker::assign_compile_id_unlocked
1840 //
1841 // Public wrapper for assign_compile_id that acquires the needed locks
1842 int CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci) {
1843   return assign_compile_id(method, osr_bci);
1844 }
1845 
1846 // ------------------------------------------------------------------
1847 // CompileBroker::create_compile_task
1848 //
1849 // Create a CompileTask object representing the current request for
1850 // compilation.  Add this task to the queue.
1851 CompileTask* CompileBroker::create_compile_task(CompileQueue*       queue,
1852                                                 int                 compile_id,
1853                                                 const methodHandle& method,
1854                                                 int                 osr_bci,
1855                                                 int                 comp_level,
1856                                                 int                 hot_count,
1857                                                 AOTCodeEntry*       aot_code_entry,
1858                                                 CompileTask::CompileReason compile_reason,
1859                                                 bool                requires_online_compilation,
1860                                                 bool                blocking) {
1861   CompileTask* new_task = new CompileTask(compile_id, method, osr_bci, comp_level,
1862                        hot_count, aot_code_entry, compile_reason, queue,
1863                        requires_online_compilation, blocking);
1864   return new_task;
1865 }
1866 
1867 #if INCLUDE_JVMCI
1868 // The number of milliseconds to wait before checking if
1869 // JVMCI compilation has made progress.
1870 static const long JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE = 1000;
1871 
1872 // The number of JVMCI compilation progress checks that must fail
1873 // before unblocking a thread waiting for a blocking compilation.
1874 static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10;
1875 
1876 /**
1877  * Waits for a JVMCI compiler to complete a given task. This thread
1878  * waits until either the task completes or it sees no JVMCI compilation
1879  * progress for N consecutive milliseconds where N is
1880  * JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE *
1881  * JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS.
1882  *
1883  * @return true if this thread needs to delete the task
1884  */
1885 bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) {
1886   assert(UseJVMCICompiler, "sanity");
1887   MonitorLocker ml(thread, CompileTaskWait_lock);
1888   int progress_wait_attempts = 0;
1889   jint thread_jvmci_compilation_ticks = 0;
1890   jint global_jvmci_compilation_ticks = jvmci->global_compilation_ticks();
1891   while (!task->is_complete() && !is_compilation_disabled_forever() &&
1892          ml.wait(JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE)) {
1893     JVMCICompileState* jvmci_compile_state = task->blocking_jvmci_compile_state();
1894 
1895     bool progress;
1896     if (jvmci_compile_state != nullptr) {
1897       jint ticks = jvmci_compile_state->compilation_ticks();
1898       progress = (ticks - thread_jvmci_compilation_ticks) != 0;
1899       JVMCI_event_1("waiting on compilation %d [ticks=%d]", task->compile_id(), ticks);
1900       thread_jvmci_compilation_ticks = ticks;
1901     } else {
1902       // Still waiting on JVMCI compiler queue. This thread may be holding a lock
1903       // that all JVMCI compiler threads are blocked on. We use the global JVMCI
1904       // compilation ticks to determine whether JVMCI compilation
1905       // is still making progress through the JVMCI compiler queue.
1906       jint ticks = jvmci->global_compilation_ticks();
1907       progress = (ticks - global_jvmci_compilation_ticks) != 0;
1908       JVMCI_event_1("waiting on compilation %d to be queued [ticks=%d]", task->compile_id(), ticks);
1909       global_jvmci_compilation_ticks = ticks;
1910     }
1911 
1912     if (!progress) {
1913       if (++progress_wait_attempts == JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS) {
1914         if (PrintCompilation) {
1915           task->print(tty, "wait for blocking compilation timed out");
1916         }
1917         JVMCI_event_1("waiting on compilation %d timed out", task->compile_id());
1918         break;
1919       }
1920     } else {
1921       progress_wait_attempts = 0;
1922     }
1923   }
1924   task->clear_waiter();
1925   return task->is_complete();
1926 }
1927 #endif
1928 
1929 /**
1930  *  Wait for the compilation task to complete.
1931  */
1932 void CompileBroker::wait_for_completion(CompileTask* task) {
1933   if (CIPrintCompileQueue) {
1934     ttyLocker ttyl;
1935     tty->print_cr("BLOCKING FOR COMPILE");
1936   }
1937 
1938   assert(task->is_blocking(), "can only wait on blocking task");
1939 
1940   JavaThread* thread = JavaThread::current();
1941 
1942   methodHandle method(thread, task->method());
1943   bool free_task;
1944 #if INCLUDE_JVMCI
1945   AbstractCompiler* comp = compiler(task->comp_level());
1946   if (!UseJVMCINativeLibrary && comp->is_jvmci() && !task->should_wait_for_compilation()) {
1947     // It may return before compilation is completed.
1948     // Note that libjvmci should not pre-emptively unblock
1949     // a thread waiting for a compilation as it does not call
1950     // Java code and so is not deadlock prone like jarjvmci.
1951     free_task = wait_for_jvmci_completion((JVMCICompiler*) comp, task, thread);
1952   } else
1953 #endif
1954   {
1955     free_task = true;
1956     // Wait until the task is complete or compilation is shut down.
1957     MonitorLocker ml(thread, CompileTaskWait_lock);
1958     while (!task->is_complete() && !is_compilation_disabled_forever()) {
1959       ml.wait();
1960     }
1961   }
1962 
1963   // It is harmless to check this status without the lock, because
1964   // completion is a stable property.
1965   if (!task->is_complete()) {
1966     // Task is not complete, likely because we are exiting for compilation
1967     // shutdown. The task can still be reached through the queue, or executed
1968     // by some compiler thread. There is no coordination with either MCQ lock
1969     // holders or compilers, therefore we cannot delete the task.
1970     //
1971     // This will leave task allocated, which leaks it. At this (degraded) point,
1972     // it is less risky to abandon the task, rather than attempting a more
1973     // complicated deletion protocol.
1974     free_task = false;
1975   }
1976 
1977   if (free_task) {
1978     assert(task->is_complete(), "Compilation should have completed");
1979     assert(task->next() == nullptr && task->prev() == nullptr,
1980            "Completed task should not be in the queue");
1981 
1982     // By convention, the waiter is responsible for deleting a
1983     // blocking CompileTask. Since there is only one waiter ever
1984     // waiting on a CompileTask, we know that no one else will
1985     // be using this CompileTask; we can delete it.
1986     delete task;
1987   }
1988 }
1989 
1990 void CompileBroker::wait_for_no_active_tasks() {
1991   CompileTask::wait_for_no_active_tasks();
1992 }
1993 
1994 /**
1995  * Initialize compiler thread(s) + compiler object(s). The postcondition
1996  * of this function is that the compiler runtimes are initialized and that
1997  * compiler threads can start compiling.
1998  */
1999 bool CompileBroker::init_compiler_runtime() {
2000   CompilerThread* thread = CompilerThread::current();
2001   AbstractCompiler* comp = thread->compiler();
2002   // Final sanity check - the compiler object must exist
2003   guarantee(comp != nullptr, "Compiler object must exist");
2004 
2005   {
2006     // Must switch to native to allocate ci_env
2007     ThreadToNativeFromVM ttn(thread);
2008     ciEnv ci_env((CompileTask*)nullptr);
2009     // Cache Jvmti state
2010     ci_env.cache_jvmti_state();
2011     // Cache DTrace flags
2012     ci_env.cache_dtrace_flags();
2013 
2014     // Switch back to VM state to do compiler initialization
2015     ThreadInVMfromNative tv(thread);
2016 
2017     comp->initialize();
2018   }
2019 
2020   if (comp->is_failed()) {
2021     disable_compilation_forever();
2022     // If compiler initialization failed, no compiler thread that is specific to a
2023     // particular compiler runtime will ever start to compile methods.
2024     shutdown_compiler_runtime(comp, thread);
2025     return false;
2026   }
2027 
2028   // C1 specific check
2029   if (comp->is_c1() && (thread->get_buffer_blob() == nullptr)) {
2030     warning("Initialization of %s thread failed (no space to run compilers)", thread->name());
2031     return false;
2032   }
2033 
2034   return true;
2035 }
2036 
2037 void CompileBroker::free_buffer_blob_if_allocated(CompilerThread* thread) {
2038   BufferBlob* blob = thread->get_buffer_blob();
2039   if (blob != nullptr) {
2040     blob->purge();
2041     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2042     CodeCache::free(blob);
2043   }
2044 }
2045 
2046 /**
2047  * If C1 and/or C2 initialization failed, we shut down all compilation.
2048  * We do this to keep things simple. This can be changed if it ever turns
2049  * out to be a problem.
2050  */
2051 void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
2052   free_buffer_blob_if_allocated(thread);
2053 
2054   log_info(compilation)("shutdown_compiler_runtime: " INTPTR_FORMAT, p2i(thread));
2055 
2056   if (comp->should_perform_shutdown()) {
2057     // There are two reasons for shutting down the compiler
2058     // 1) compiler runtime initialization failed
2059     // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing
2060     warning("%s initialization failed. Shutting down all compilers", comp->name());
2061 
2062     // Only one thread per compiler runtime object enters here
2063     // Set state to shut down
2064     comp->set_shut_down();
2065 
2066     // Delete all queued compilation tasks to make compiler threads exit faster.
2067     if (_c1_compile_queue != nullptr) {
2068       _c1_compile_queue->delete_all();
2069     }
2070 
2071     if (_c2_compile_queue != nullptr) {
2072       _c2_compile_queue->delete_all();
2073     }
2074 
2075     // Set flags so that we continue execution with using interpreter only.
2076     UseCompiler    = false;
2077     UseInterpreter = true;
2078 
2079     // We could delete compiler runtimes also. However, there are references to
2080     // the compiler runtime(s) (e.g.,  nmethod::is_compiled_by_c1()) which then
2081     // fail. This can be done later if necessary.
2082   }
2083 }
2084 
2085 /**
2086  * Helper function to create new or reuse old CompileLog.
2087  */
2088 CompileLog* CompileBroker::get_log(CompilerThread* ct) {
2089   if (!LogCompilation) return nullptr;
2090 
2091   AbstractCompiler *compiler = ct->compiler();
2092   bool c1 = compiler->is_c1();
2093   jobject* compiler_objects = c1 ? _compiler1_objects : _compiler2_objects;
2094   assert(compiler_objects != nullptr, "must be initialized at this point");
2095   CompileLog** logs = c1 ? _compiler1_logs : _compiler2_logs;
2096   assert(logs != nullptr, "must be initialized at this point");
2097   int count = c1 ? _c1_count : _c2_count;
2098 
2099   if (ct->queue() == _ac1_compile_queue || ct->queue() == _ac2_compile_queue) {
2100     compiler_objects = _ac_objects;
2101     logs  = _ac_logs;
2102     count = _ac_count;
2103   }
2104   // Find Compiler number by its threadObj.
2105   oop compiler_obj = ct->threadObj();
2106   int compiler_number = 0;
2107   bool found = false;
2108   for (; compiler_number < count; compiler_number++) {
2109     if (JNIHandles::resolve_non_null(compiler_objects[compiler_number]) == compiler_obj) {
2110       found = true;
2111       break;
2112     }
2113   }
2114   assert(found, "Compiler must exist at this point");
2115 
2116   // Determine pointer for this thread's log.
2117   CompileLog** log_ptr = &logs[compiler_number];
2118 
2119   // Return old one if it exists.
2120   CompileLog* log = *log_ptr;
2121   if (log != nullptr) {
2122     ct->init_log(log);
2123     return log;
2124   }
2125 
2126   // Create a new one and remember it.
2127   init_compiler_thread_log();
2128   log = ct->log();
2129   *log_ptr = log;
2130   return log;
2131 }
2132 
2133 // ------------------------------------------------------------------
2134 // CompileBroker::compiler_thread_loop
2135 //
2136 // The main loop run by a CompilerThread.
2137 void CompileBroker::compiler_thread_loop() {
2138   CompilerThread* thread = CompilerThread::current();
2139   CompileQueue* queue = thread->queue();
2140   // For the thread that initializes the ciObjectFactory
2141   // this resource mark holds all the shared objects
2142   ResourceMark rm;
2143 
2144   // First thread to get here will initialize the compiler interface
2145 
2146   {
2147     ASSERT_IN_VM;
2148     MutexLocker only_one (thread, CompileThread_lock);
2149     if (!ciObjectFactory::is_initialized()) {
2150       ciObjectFactory::initialize();
2151     }
2152   }
2153 
2154   // Open a log.
2155   CompileLog* log = get_log(thread);
2156   if (log != nullptr) {
2157     log->begin_elem("start_compile_thread name='%s' thread='%zu' process='%d'",
2158                     thread->name(),
2159                     os::current_thread_id(),
2160                     os::current_process_id());
2161     log->stamp();
2162     log->end_elem();
2163   }
2164 
2165   if (!thread->init_compilation_timeout()) {
2166     return;
2167   }
2168 
2169   // If compiler thread/runtime initialization fails, exit the compiler thread
2170   if (!init_compiler_runtime()) {
2171     return;
2172   }
2173 
2174   thread->start_idle_timer();
2175 
2176   // Poll for new compilation tasks as long as the JVM runs. Compilation
2177   // should only be disabled if something went wrong while initializing the
2178   // compiler runtimes. This, in turn, should not happen. The only known case
2179   // when compiler runtime initialization fails is if there is not enough free
2180   // space in the code cache to generate the necessary stubs, etc.
2181   while (!is_compilation_disabled_forever()) {
2182     // We need this HandleMark to avoid leaking VM handles.
2183     HandleMark hm(thread);
2184 
2185     RecompilationPolicy::recompilation_step(AOTRecompilationWorkUnitSize, thread);
2186 
2187     CompileTask* task = queue->get(thread);
2188     if (task == nullptr) {
2189       if (UseDynamicNumberOfCompilerThreads) {
2190         // Access compiler_count under lock to enforce consistency.
2191         MutexLocker only_one(CompileThread_lock);
2192         if (can_remove(thread, true)) {
2193           if (trace_compiler_threads()) {
2194             ResourceMark rm;
2195             stringStream msg;
2196             msg.print("Removing compiler thread %s after " JLONG_FORMAT " ms idle time",
2197                       thread->name(), thread->idle_time_millis());
2198             print_compiler_threads(msg);
2199           }
2200 
2201           // Notify compiler that the compiler thread is about to stop
2202           thread->compiler()->stopping_compiler_thread(thread);
2203 
2204           free_buffer_blob_if_allocated(thread);
2205           return; // Stop this thread.
2206         }
2207       }
2208     } else {
2209       // Assign the task to the current thread.  Mark this compilation
2210       // thread as active for the profiler.
2211       // CompileTaskWrapper also keeps the Method* from being deallocated if redefinition
2212       // occurs after fetching the compile task off the queue.
2213       CompileTaskWrapper ctw(task);
2214       methodHandle method(thread, task->method());
2215 
2216       // Never compile a method if breakpoints are present in it
2217       if (method()->number_of_breakpoints() == 0) {
2218         // Compile the method.
2219         if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
2220           invoke_compiler_on_method(task);
2221           thread->start_idle_timer();
2222         } else {
2223           // After compilation is disabled, remove remaining methods from queue
2224           method->clear_queued_for_compilation();
2225           method->set_pending_queue_processed(false);
2226           task->set_failure_reason("compilation is disabled");
2227         }
2228       } else {
2229         task->set_failure_reason("breakpoints are present");
2230       }
2231 
2232       // Don't use AOT compielr threads for dynamic C1 and C2 threads creation.
2233       if (UseDynamicNumberOfCompilerThreads &&
2234           (queue == _c1_compile_queue || queue == _c2_compile_queue)) {
2235         possibly_add_compiler_threads(thread);
2236         assert(!thread->has_pending_exception(), "should have been handled");
2237       }
2238     }
2239   }
2240 
2241   // Shut down compiler runtime
2242   shutdown_compiler_runtime(thread->compiler(), thread);
2243 }
2244 
2245 // ------------------------------------------------------------------
2246 // CompileBroker::init_compiler_thread_log
2247 //
2248 // Set up state required by +LogCompilation.
2249 void CompileBroker::init_compiler_thread_log() {
2250     CompilerThread* thread = CompilerThread::current();
2251     char  file_name[4*K];
2252     FILE* fp = nullptr;
2253     intx thread_id = os::current_thread_id();
2254     for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) {
2255       const char* dir = (try_temp_dir ? os::get_temp_directory() : nullptr);
2256       if (dir == nullptr) {
2257         jio_snprintf(file_name, sizeof(file_name), "hs_c%zu_pid%u.log",
2258                      thread_id, os::current_process_id());
2259       } else {
2260         jio_snprintf(file_name, sizeof(file_name),
2261                      "%s%shs_c%zu_pid%u.log", dir,
2262                      os::file_separator(), thread_id, os::current_process_id());
2263       }
2264 
2265       fp = os::fopen(file_name, "wt");
2266       if (fp != nullptr) {
2267         if (LogCompilation && Verbose) {
2268           tty->print_cr("Opening compilation log %s", file_name);
2269         }
2270         CompileLog* log = new(mtCompiler) CompileLog(file_name, fp, thread_id);
2271         if (log == nullptr) {
2272           fclose(fp);
2273           return;
2274         }
2275         thread->init_log(log);
2276 
2277         if (xtty != nullptr) {
2278           ttyLocker ttyl;
2279           // Record any per thread log files
2280           xtty->elem("thread_logfile thread='%zd' filename='%s'", thread_id, file_name);
2281         }
2282         return;
2283       }
2284     }
2285     warning("Cannot open log file: %s", file_name);
2286 }
2287 
2288 void CompileBroker::log_metaspace_failure() {
2289   const char* message = "some methods may not be compiled because metaspace "
2290                         "is out of memory";
2291   if (CompilationLog::log() != nullptr) {
2292     CompilationLog::log()->log_metaspace_failure(message);
2293   }
2294   if (PrintCompilation) {
2295     tty->print_cr("COMPILE PROFILING SKIPPED: %s", message);
2296   }
2297 }
2298 
2299 
2300 // ------------------------------------------------------------------
2301 // CompileBroker::set_should_block
2302 //
2303 // Set _should_block.
2304 // Call this from the VM, with Threads_lock held and a safepoint requested.
2305 void CompileBroker::set_should_block() {
2306   assert(Threads_lock->owner() == Thread::current(), "must have threads lock");
2307   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint already");
2308 #ifndef PRODUCT
2309   if (PrintCompilation && (Verbose || WizardMode))
2310     tty->print_cr("notifying compiler thread pool to block");
2311 #endif
2312   _should_block = true;
2313 }
2314 
2315 // ------------------------------------------------------------------
2316 // CompileBroker::maybe_block
2317 //
2318 // Call this from the compiler at convenient points, to poll for _should_block.
2319 void CompileBroker::maybe_block() {
2320   if (_should_block) {
2321 #ifndef PRODUCT
2322     if (PrintCompilation && (Verbose || WizardMode))
2323       tty->print_cr("compiler thread " INTPTR_FORMAT " poll detects block request", p2i(Thread::current()));
2324 #endif
2325     // If we are executing a task during the request to block, report the task
2326     // before disappearing.
2327     CompilerThread* thread = CompilerThread::current();
2328     if (thread != nullptr) {
2329       CompileTask* task = thread->task();
2330       if (task != nullptr) {
2331         if (PrintCompilation) {
2332           task->print(tty, "blocked");
2333         }
2334         task->print_ul("blocked");
2335       }
2336     }
2337     // Go to VM state and block for final VM shutdown safepoint.
2338     ThreadInVMfromNative tivfn(JavaThread::current());
2339     assert(false, "Should never unblock from TIVNM entry");
2340   }
2341 }
2342 
2343 // wrapper for CodeCache::print_summary()
2344 static void codecache_print(bool detailed)
2345 {
2346   stringStream s;
2347   // Dump code cache  into a buffer before locking the tty,
2348   {
2349     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2350     CodeCache::print_summary(&s, detailed);
2351   }
2352   ttyLocker ttyl;
2353   tty->print("%s", s.freeze());
2354 }
2355 
2356 // wrapper for CodeCache::print_summary() using outputStream
2357 static void codecache_print(outputStream* out, bool detailed) {
2358   stringStream s;
2359 
2360   // Dump code cache into a buffer
2361   {
2362     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2363     CodeCache::print_summary(&s, detailed);
2364   }
2365 
2366   char* remaining_log = s.as_string();
2367   while (*remaining_log != '\0') {
2368     char* eol = strchr(remaining_log, '\n');
2369     if (eol == nullptr) {
2370       out->print_cr("%s", remaining_log);
2371       remaining_log = remaining_log + strlen(remaining_log);
2372     } else {
2373       *eol = '\0';
2374       out->print_cr("%s", remaining_log);
2375       remaining_log = eol + 1;
2376     }
2377   }
2378 }
2379 
2380 void CompileBroker::handle_compile_error(CompilerThread* thread, CompileTask* task, ciEnv* ci_env,
2381                                          int compilable, const char* failure_reason) {
2382   if (!AbortVMOnCompilationFailure) {
2383     return;
2384   }
2385   if (compilable == ciEnv::MethodCompilable_not_at_tier) {
2386     fatal("Not compilable at tier %d: %s", task->comp_level(), failure_reason);
2387   }
2388   if (compilable == ciEnv::MethodCompilable_never) {
2389     fatal("Never compilable: %s", failure_reason);
2390   }
2391 }
2392 
2393 static void post_compilation_event(EventCompilation& event, CompileTask* task) {
2394   assert(task != nullptr, "invariant");
2395   CompilerEvent::CompilationEvent::post(event,
2396                                         task->compile_id(),
2397                                         task->compiler()->type(),
2398                                         task->method(),
2399                                         task->comp_level(),
2400                                         task->is_success(),
2401                                         task->osr_bci() != CompileBroker::standard_entry_bci,
2402                                         task->nm_total_size(),
2403                                         task->num_inlined_bytecodes(),
2404                                         task->arena_bytes());
2405 }
2406 
2407 int DirectivesStack::_depth = 0;
2408 CompilerDirectives* DirectivesStack::_top = nullptr;
2409 CompilerDirectives* DirectivesStack::_bottom = nullptr;
2410 
2411 // Acquires Compilation_lock and waits for it to be notified
2412 // as long as WhiteBox::compilation_locked is true.
2413 static void whitebox_lock_compilation() {
2414   MonitorLocker locker(Compilation_lock, Mutex::_no_safepoint_check_flag);
2415   while (WhiteBox::compilation_locked) {
2416     locker.wait();
2417   }
2418 }
2419 
2420 // ------------------------------------------------------------------
2421 // CompileBroker::invoke_compiler_on_method
2422 //
2423 // Compile a method.
2424 //
2425 void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
2426   task->print_ul();
2427   elapsedTimer time;
2428 
2429   DirectiveSet* directive = task->directive();
2430 
2431   CompilerThread* thread = CompilerThread::current();
2432   ResourceMark rm(thread);
2433 
2434   if (CompilationLog::log() != nullptr) {
2435     CompilationLog::log()->log_compile(thread, task);
2436   }
2437 
2438   // Common flags.
2439   int compile_id = task->compile_id();
2440   int osr_bci = task->osr_bci();
2441   bool is_osr = (osr_bci != standard_entry_bci);
2442   bool should_log = (thread->log() != nullptr);
2443   bool should_break = false;
2444   bool should_print_compilation = PrintCompilation || directive->PrintCompilationOption;
2445   const int task_level = task->comp_level();
2446   AbstractCompiler* comp = task->compiler();
2447   {
2448     // create the handle inside it's own block so it can't
2449     // accidentally be referenced once the thread transitions to
2450     // native.  The NoHandleMark before the transition should catch
2451     // any cases where this occurs in the future.
2452     methodHandle method(thread, task->method());
2453 
2454     assert(!method->is_native(), "no longer compile natives");
2455 
2456     // Update compile information when using perfdata.
2457     if (UsePerfData) {
2458       update_compile_perf_data(thread, method, is_osr);
2459     }
2460 
2461     DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level));
2462   }
2463 
2464   should_break = directive->BreakAtCompileOption || task->check_break_at_flags();
2465   if (should_log && !directive->LogOption) {
2466     should_log = false;
2467   }
2468 
2469   // Allocate a new set of JNI handles.
2470   JNIHandleMark jhm(thread);
2471   Method* target_handle = task->method();
2472   int compilable = ciEnv::MethodCompilable;
2473   const char* failure_reason = nullptr;
2474   bool failure_reason_on_C_heap = false;
2475   const char* retry_message = nullptr;
2476 
2477 #if INCLUDE_JVMCI
2478   if (UseJVMCICompiler && comp != nullptr && comp->is_jvmci()) {
2479     JVMCICompiler* jvmci = (JVMCICompiler*) comp;
2480 
2481     TraceTime t1("compilation", &time);
2482     EventCompilation event;
2483     JVMCICompileState compile_state(task, jvmci);
2484     JVMCIRuntime *runtime = nullptr;
2485 
2486     if (JVMCI::in_shutdown()) {
2487       failure_reason = "in JVMCI shutdown";
2488       retry_message = "not retryable";
2489       compilable = ciEnv::MethodCompilable_never;
2490     } else if (compile_state.target_method_is_old()) {
2491       // Skip redefined methods
2492       failure_reason = "redefined method";
2493       retry_message = "not retryable";
2494       compilable = ciEnv::MethodCompilable_never;
2495     } else {
2496       JVMCIEnv env(thread, &compile_state, __FILE__, __LINE__);
2497       if (env.init_error() != JNI_OK) {
2498         const char* msg = env.init_error_msg();
2499         failure_reason = os::strdup(err_msg("Error attaching to libjvmci (err: %d, %s)",
2500                                     env.init_error(), msg == nullptr ? "unknown" : msg), mtJVMCI);
2501         bool reason_on_C_heap = true;
2502         // In case of JNI_ENOMEM, there's a good chance a subsequent attempt to create libjvmci or attach to it
2503         // might succeed. Other errors most likely indicate a non-recoverable error in the JVMCI runtime.
2504         bool retryable = env.init_error() == JNI_ENOMEM;
2505         compile_state.set_failure(retryable, failure_reason, reason_on_C_heap);
2506       }
2507       if (failure_reason == nullptr) {
2508         if (WhiteBoxAPI && WhiteBox::compilation_locked) {
2509           // Must switch to native to block
2510           ThreadToNativeFromVM ttn(thread);
2511           whitebox_lock_compilation();
2512         }
2513         methodHandle method(thread, target_handle);
2514         runtime = env.runtime();
2515         runtime->compile_method(&env, jvmci, method, osr_bci);
2516 
2517         failure_reason = compile_state.failure_reason();
2518         failure_reason_on_C_heap = compile_state.failure_reason_on_C_heap();
2519         if (!compile_state.retryable()) {
2520           retry_message = "not retryable";
2521           compilable = ciEnv::MethodCompilable_not_at_tier;
2522         }
2523         if (!task->is_success()) {
2524           assert(failure_reason != nullptr, "must specify failure_reason");
2525         }
2526       }
2527     }
2528     if (!task->is_success() && !JVMCI::in_shutdown()) {
2529       handle_compile_error(thread, task, nullptr, compilable, failure_reason);
2530     }
2531     if (event.should_commit()) {
2532       post_compilation_event(event, task);
2533     }
2534 
2535     if (runtime != nullptr) {
2536       runtime->post_compile(thread);
2537     }
2538   } else
2539 #endif // INCLUDE_JVMCI
2540   {
2541     NoHandleMark  nhm;
2542     ThreadToNativeFromVM ttn(thread);
2543 
2544     ciEnv ci_env(task);
2545     if (should_break) {
2546       ci_env.set_break_at_compile(true);
2547     }
2548     if (should_log) {
2549       ci_env.set_log(thread->log());
2550     }
2551     assert(thread->env() == &ci_env, "set by ci_env");
2552     // The thread-env() field is cleared in ~CompileTaskWrapper.
2553 
2554     // Cache Jvmti state
2555     bool method_is_old = ci_env.cache_jvmti_state();
2556 
2557     // Skip redefined methods
2558     if (method_is_old) {
2559       ci_env.record_method_not_compilable("redefined method", true);
2560     }
2561 
2562     // Cache DTrace flags
2563     ci_env.cache_dtrace_flags();
2564 
2565     ciMethod* target = ci_env.get_method_from_handle(target_handle);
2566 
2567     TraceTime t1("compilation", &time);
2568     EventCompilation event;
2569 
2570     if (comp == nullptr) {
2571       ci_env.record_method_not_compilable("no compiler");
2572     } else if (!ci_env.failing()) {
2573       if (WhiteBoxAPI && WhiteBox::compilation_locked) {
2574         whitebox_lock_compilation();
2575       }
2576       comp->compile_method(&ci_env, target, osr_bci, true, directive);
2577 
2578       /* Repeat compilation without installing code for profiling purposes */
2579       int repeat_compilation_count = directive->RepeatCompilationOption;
2580       while (repeat_compilation_count > 0) {
2581         ResourceMark rm(thread);
2582         task->print_ul("NO CODE INSTALLED");
2583         comp->compile_method(&ci_env, target, osr_bci, false, directive);
2584         repeat_compilation_count--;
2585       }
2586     }
2587 
2588 
2589     if (!ci_env.failing() && !task->is_success() && !task->is_precompile()) {
2590       assert(ci_env.failure_reason() != nullptr, "expect failure reason");
2591       assert(false, "compiler should always document failure: %s", ci_env.failure_reason());
2592       // The compiler elected, without comment, not to register a result.
2593       // Do not attempt further compilations of this method.
2594       ci_env.record_method_not_compilable("compile failed");
2595     }
2596 
2597     // Copy this bit to the enclosing block:
2598     compilable = ci_env.compilable();
2599 
2600     if (ci_env.failing()) {
2601       // Duplicate the failure reason string, so that it outlives ciEnv
2602       failure_reason = os::strdup(ci_env.failure_reason(), mtCompiler);
2603       failure_reason_on_C_heap = true;
2604       retry_message = ci_env.retry_message();
2605       ci_env.report_failure(failure_reason);
2606     }
2607 
2608     if (ci_env.failing()) {
2609       handle_compile_error(thread, task, &ci_env, compilable, failure_reason);
2610     }
2611     if (event.should_commit()) {
2612       post_compilation_event(event, task);
2613     }
2614   }
2615 
2616   if (failure_reason != nullptr) {
2617     task->set_failure_reason(failure_reason, failure_reason_on_C_heap);
2618     if (CompilationLog::log() != nullptr) {
2619       CompilationLog::log()->log_failure(thread, task, failure_reason, retry_message);
2620     }
2621     if (PrintCompilation || directive->PrintCompilationOption) {
2622       FormatBufferResource msg = retry_message != nullptr ?
2623         FormatBufferResource("COMPILE SKIPPED: %s (%s)", failure_reason, retry_message) :
2624         FormatBufferResource("COMPILE SKIPPED: %s",      failure_reason);
2625       task->print(tty, msg);
2626     }
2627   }
2628 
2629   task->mark_finished(os::elapsed_counter());
2630   DirectivesStack::release(directive);
2631 
2632   methodHandle method(thread, task->method());
2633 
2634   DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success());
2635 
2636   collect_statistics(thread, time, task);
2637 
2638   if (PrintCompilation && PrintCompilation2) {
2639     tty->print("%7d ", (int) tty->time_stamp().milliseconds());  // print timestamp
2640     tty->print("%4d ", compile_id);    // print compilation number
2641     tty->print("%s ", (is_osr ? "%" : (task->is_aot_load() ? (task->preload() ? "P" : "A") : " ")));
2642     if (task->is_success()) {
2643       tty->print("size: %d(%d) ", task->nm_total_size(), task->nm_insts_size());
2644     }
2645     tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes());
2646   }
2647 
2648   Log(compilation, codecache) log;
2649   if (log.is_debug()) {
2650     LogStream ls(log.debug());
2651     codecache_print(&ls, /* detailed= */ false);
2652   }
2653   if (PrintCodeCacheOnCompilation) {
2654     codecache_print(/* detailed= */ false);
2655   }
2656   // Disable compilation, if required.
2657   switch (compilable) {
2658   case ciEnv::MethodCompilable_never:
2659     if (is_osr)
2660       method->set_not_osr_compilable_quietly("MethodCompilable_never");
2661     else
2662       method->set_not_compilable_quietly("MethodCompilable_never");
2663     break;
2664   case ciEnv::MethodCompilable_not_at_tier:
2665     if (is_osr)
2666       method->set_not_osr_compilable_quietly("MethodCompilable_not_at_tier", task_level);
2667     else
2668       method->set_not_compilable_quietly("MethodCompilable_not_at_tier", task_level);
2669     break;
2670   }
2671 
2672   // Note that the queued_for_compilation bits are cleared without
2673   // protection of a mutex. [They were set by the requester thread,
2674   // when adding the task to the compile queue -- at which time the
2675   // compile queue lock was held. Subsequently, we acquired the compile
2676   // queue lock to get this task off the compile queue; thus (to belabour
2677   // the point somewhat) our clearing of the bits must be occurring
2678   // only after the setting of the bits. See also 14012000 above.
2679   method->clear_queued_for_compilation();
2680   method->set_pending_queue_processed(false);
2681 
2682   if (should_print_compilation) {
2683     ResourceMark rm;
2684     task->print_tty();
2685   }
2686 }
2687 
2688 /**
2689  * The CodeCache is full. Print warning and disable compilation.
2690  * Schedule code cache cleaning so compilation can continue later.
2691  * This function needs to be called only from CodeCache::allocate(),
2692  * since we currently handle a full code cache uniformly.
2693  */
2694 void CompileBroker::handle_full_code_cache(CodeBlobType code_blob_type) {
2695   UseInterpreter = true;
2696   if (UseCompiler || AlwaysCompileLoopMethods ) {
2697     if (xtty != nullptr) {
2698       stringStream s;
2699       // Dump code cache state into a buffer before locking the tty,
2700       // because log_state() will use locks causing lock conflicts.
2701       CodeCache::log_state(&s);
2702       // Lock to prevent tearing
2703       ttyLocker ttyl;
2704       xtty->begin_elem("code_cache_full");
2705       xtty->print("%s", s.freeze());
2706       xtty->stamp();
2707       xtty->end_elem();
2708     }
2709 
2710 #ifndef PRODUCT
2711     if (ExitOnFullCodeCache) {
2712       codecache_print(/* detailed= */ true);
2713       before_exit(JavaThread::current());
2714       exit_globals(); // will delete tty
2715       vm_direct_exit(1);
2716     }
2717 #endif
2718     if (UseCodeCacheFlushing) {
2719       // Since code cache is full, immediately stop new compiles
2720       if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
2721         log_info(codecache)("Code cache is full - disabling compilation");
2722       }
2723     } else {
2724       disable_compilation_forever();
2725     }
2726 
2727     CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning());
2728   }
2729 }
2730 
2731 // ------------------------------------------------------------------
2732 // CompileBroker::update_compile_perf_data
2733 //
2734 // Record this compilation for debugging purposes.
2735 void CompileBroker::update_compile_perf_data(CompilerThread* thread, const methodHandle& method, bool is_osr) {
2736   ResourceMark rm;
2737   char* method_name = method->name()->as_C_string();
2738   char current_method[CompilerCounters::cmname_buffer_length];
2739   size_t maxLen = CompilerCounters::cmname_buffer_length;
2740 
2741   const char* class_name = method->method_holder()->name()->as_C_string();
2742 
2743   size_t s1len = strlen(class_name);
2744   size_t s2len = strlen(method_name);
2745 
2746   // check if we need to truncate the string
2747   if (s1len + s2len + 2 > maxLen) {
2748 
2749     // the strategy is to lop off the leading characters of the
2750     // class name and the trailing characters of the method name.
2751 
2752     if (s2len + 2 > maxLen) {
2753       // lop of the entire class name string, let snprintf handle
2754       // truncation of the method name.
2755       class_name += s1len; // null string
2756     }
2757     else {
2758       // lop off the extra characters from the front of the class name
2759       class_name += ((s1len + s2len + 2) - maxLen);
2760     }
2761   }
2762 
2763   jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name);
2764 
2765   int last_compile_type = normal_compile;
2766   if (CICountOSR && is_osr) {
2767     last_compile_type = osr_compile;
2768   } else if (CICountNative && method->is_native()) {
2769     last_compile_type = native_compile;
2770   }
2771 
2772   CompilerCounters* counters = thread->counters();
2773   counters->set_current_method(current_method);
2774   counters->set_compile_type((jlong) last_compile_type);
2775 }
2776 
2777 // ------------------------------------------------------------------
2778 // CompileBroker::collect_statistics
2779 //
2780 // Collect statistics about the compilation.
2781 
2782 void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task) {
2783   bool success = task->is_success();
2784   methodHandle method (thread, task->method());
2785   int compile_id = task->compile_id();
2786   bool is_osr = (task->osr_bci() != standard_entry_bci);
2787   const int comp_level = task->comp_level();
2788   CompilerCounters* counters = thread->counters();
2789 
2790   MutexLocker locker(CompileStatistics_lock);
2791 
2792   // _perf variables are production performance counters which are
2793   // updated regardless of the setting of the CITime and CITimeEach flags
2794   //
2795 
2796   // account all time, including bailouts and failures in this counter;
2797   // C1 and C2 counters are counting both successful and unsuccessful compiles
2798   _t_total_compilation.add(&time);
2799 
2800   // Update compilation times. Used by the implementation of JFR CompilerStatistics
2801   // and java.lang.management.CompilationMXBean.
2802   _perf_total_compilation->inc(time.ticks());
2803   _peak_compilation_time = MAX2(time.milliseconds(), _peak_compilation_time);
2804 
2805   if (!success) {
2806     _total_bailout_count++;
2807     if (UsePerfData) {
2808       _perf_last_failed_method->set_value(counters->current_method());
2809       _perf_last_failed_type->set_value(counters->compile_type());
2810       _perf_total_bailout_count->inc();
2811     }
2812     _t_bailedout_compilation.add(&time);
2813 
2814     if (CITime || log_is_enabled(Info, init)) {
2815       CompilerStatistics* stats = nullptr;
2816       if (task->is_aot_load()) {
2817         int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1);
2818         stats = &_aot_stats_per_level[level];
2819       } else {
2820         stats = &_stats_per_level[comp_level-1];
2821       }
2822       stats->_bailout.update(time, 0);
2823     }
2824   } else if (!task->is_success()) {
2825     if (UsePerfData) {
2826       _perf_last_invalidated_method->set_value(counters->current_method());
2827       _perf_last_invalidated_type->set_value(counters->compile_type());
2828       _perf_total_invalidated_count->inc();
2829     }
2830     _total_invalidated_count++;
2831     _t_invalidated_compilation.add(&time);
2832 
2833     if (CITime || log_is_enabled(Info, init)) {
2834       CompilerStatistics* stats = nullptr;
2835       if (task->is_aot_load()) {
2836         int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1);
2837         stats = &_aot_stats_per_level[level];
2838       } else {
2839         stats = &_stats_per_level[comp_level-1];
2840       }
2841       stats->_invalidated.update(time, 0);
2842     }
2843   } else {
2844     // Compilation succeeded
2845     if (CITime || log_is_enabled(Info, init)) {
2846       int bytes_compiled = method->code_size() + task->num_inlined_bytecodes();
2847       if (is_osr) {
2848         _t_osr_compilation.add(&time);
2849         _sum_osr_bytes_compiled += bytes_compiled;
2850       } else {
2851         _t_standard_compilation.add(&time);
2852         _sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes();
2853       }
2854 
2855       // Collect statistic per compilation level
2856       if (task->is_aot_load()) {
2857         _aot_stats._standard.update(time, bytes_compiled);
2858         _aot_stats._nmethods_size += task->nm_total_size();
2859         _aot_stats._nmethods_code_size += task->nm_insts_size();
2860         int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1);
2861         CompilerStatistics* stats = &_aot_stats_per_level[level];
2862         stats->_standard.update(time, bytes_compiled);
2863         stats->_nmethods_size += task->nm_total_size();
2864         stats->_nmethods_code_size += task->nm_insts_size();
2865       } else if (comp_level > CompLevel_none && comp_level <= CompLevel_full_optimization) {
2866         CompilerStatistics* stats = &_stats_per_level[comp_level-1];
2867         if (is_osr) {
2868           stats->_osr.update(time, bytes_compiled);
2869         } else {
2870           stats->_standard.update(time, bytes_compiled);
2871         }
2872         stats->_nmethods_size += task->nm_total_size();
2873         stats->_nmethods_code_size += task->nm_insts_size();
2874       } else {
2875         assert(false, "CompilerStatistics object does not exist for compilation level %d", comp_level);
2876       }
2877 
2878       // Collect statistic per compiler
2879       AbstractCompiler* comp = task->compiler();
2880       if (comp && !task->is_aot_load()) {
2881         CompilerStatistics* stats = comp->stats();
2882         if (is_osr) {
2883           stats->_osr.update(time, bytes_compiled);
2884         } else {
2885           stats->_standard.update(time, bytes_compiled);
2886         }
2887         stats->_nmethods_size += task->nm_total_size();
2888         stats->_nmethods_code_size += task->nm_insts_size();
2889       } else if (!task->is_aot_load()) { // if (!comp)
2890         assert(false, "Compiler object must exist");
2891       }
2892     }
2893 
2894     if (UsePerfData) {
2895       // save the name of the last method compiled
2896       _perf_last_method->set_value(counters->current_method());
2897       _perf_last_compile_type->set_value(counters->compile_type());
2898       _perf_last_compile_size->set_value(method->code_size() +
2899                                          task->num_inlined_bytecodes());
2900       if (is_osr) {
2901         _perf_osr_compilation->inc(time.ticks());
2902         _perf_sum_osr_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes());
2903       } else {
2904         _perf_standard_compilation->inc(time.ticks());
2905         _perf_sum_standard_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes());
2906       }
2907     }
2908 
2909     if (CITimeEach) {
2910       double compile_time = time.seconds();
2911       double bytes_per_sec = compile_time == 0.0 ? 0.0 : (double)(method->code_size() + task->num_inlined_bytecodes()) / compile_time;
2912       tty->print_cr("%3d   seconds: %6.3f bytes/sec : %f (bytes %d + %d inlined)",
2913                     compile_id, compile_time, bytes_per_sec, method->code_size(), task->num_inlined_bytecodes());
2914     }
2915 
2916     // Collect counts of successful compilations
2917     _sum_nmethod_size      += task->nm_total_size();
2918     _sum_nmethod_code_size += task->nm_insts_size();
2919     _total_compile_count++;
2920 
2921     if (UsePerfData) {
2922       _perf_sum_nmethod_size->inc(     task->nm_total_size());
2923       _perf_sum_nmethod_code_size->inc(task->nm_insts_size());
2924       _perf_total_compile_count->inc();
2925     }
2926 
2927     if (is_osr) {
2928       if (UsePerfData) _perf_total_osr_compile_count->inc();
2929       _total_osr_compile_count++;
2930     } else {
2931       if (UsePerfData) _perf_total_standard_compile_count->inc();
2932       _total_standard_compile_count++;
2933     }
2934   }
2935   // set the current method for the thread to null
2936   if (UsePerfData) counters->set_current_method("");
2937 }
2938 
2939 const char* CompileBroker::compiler_name(int comp_level) {
2940   AbstractCompiler *comp = CompileBroker::compiler(comp_level);
2941   if (comp == nullptr) {
2942     return "no compiler";
2943   } else {
2944     return (comp->name());
2945   }
2946 }
2947 
2948 jlong CompileBroker::total_compilation_ticks() {
2949   return _perf_total_compilation != nullptr ? _perf_total_compilation->get_value() : 0;
2950 }
2951 
2952 void CompileBroker::log_not_entrant(nmethod* nm) {
2953   _total_not_entrant_count++;
2954   if (CITime || log_is_enabled(Info, init)) {
2955     CompilerStatistics* stats = nullptr;
2956     int level = nm->comp_level();
2957     if (nm->is_aot()) {
2958       if (nm->preloaded()) {
2959         assert(level == CompLevel_full_optimization, "%d", level);
2960         level = CompLevel_full_optimization + 1;
2961       }
2962       stats = &_aot_stats_per_level[level - 1];
2963     } else {
2964       stats = &_stats_per_level[level - 1];
2965     }
2966     stats->_made_not_entrant._count++;
2967   }
2968 }
2969 
2970 void CompileBroker::print_times(const char* name, CompilerStatistics* stats) {
2971   tty->print_cr("  %s {speed: %6.3f bytes/s; standard: %6.3f s, %u bytes, %u methods; osr: %6.3f s, %u bytes, %u methods; nmethods_size: %u bytes; nmethods_code_size: %u bytes}",
2972                 name, stats->bytes_per_second(),
2973                 stats->_standard._time.seconds(), stats->_standard._bytes, stats->_standard._count,
2974                 stats->_osr._time.seconds(), stats->_osr._bytes, stats->_osr._count,
2975                 stats->_nmethods_size, stats->_nmethods_code_size);
2976 }
2977 
2978 static void print_helper(outputStream* st, const char* name, CompilerStatistics::Data data, bool print_time = true) {
2979   if (data._count > 0) {
2980     st->print("; %s: %4u methods", name, data._count);
2981     if (print_time) {
2982       st->print(" (in %.3fs)", data._time.seconds());
2983     }
2984   }
2985 }
2986 
2987 static void print_tier_helper(outputStream* st, const char* prefix, int tier, CompilerStatistics* stats) {
2988   st->print("    %s%d: %5u methods", prefix, tier, stats->_standard._count);
2989   if (stats->_standard._count > 0) {
2990     st->print(" (in %.3fs)", stats->_standard._time.seconds());
2991   }
2992   print_helper(st, "osr",     stats->_osr);
2993   print_helper(st, "bailout", stats->_bailout);
2994   print_helper(st, "invalid", stats->_invalidated);
2995   print_helper(st, "not_entrant", stats->_made_not_entrant, false);
2996   st->cr();
2997 }
2998 
2999 static void print_queue_info(outputStream* st, CompileQueue* queue) {
3000   if (queue != nullptr) {
3001     MutexLocker ml(queue->lock());
3002 
3003     uint  total_cnt = 0;
3004     uint active_cnt = 0;
3005     for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) {
3006       guarantee(jt != nullptr, "");
3007       if (jt->is_Compiler_thread()) {
3008         CompilerThread* ct = (CompilerThread*)jt;
3009 
3010         guarantee(ct != nullptr, "");
3011         if (ct->queue() == queue) {
3012           ++total_cnt;
3013           CompileTask* task = ct->task();
3014           if (task != nullptr) {
3015             ++active_cnt;
3016           }
3017         }
3018       }
3019     }
3020 
3021     st->print("  %s (%d active / %d total threads): %u tasks",
3022               queue->name(), active_cnt, total_cnt, queue->size());
3023     if (queue->size() > 0) {
3024       uint counts[] = {0, 0, 0, 0, 0}; // T1 ... T5
3025       for (CompileTask* task = queue->first(); task != nullptr; task = task->next()) {
3026         int tier = task->comp_level();
3027         if (task->is_aot_load() && task->preload()) {
3028           assert(tier == CompLevel_full_optimization, "%d", tier);
3029           tier = CompLevel_full_optimization + 1;
3030         }
3031         counts[tier-1]++;
3032       }
3033       st->print(":");
3034       for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) {
3035         uint cnt = counts[tier-1];
3036         if (cnt > 0) {
3037           st->print(" T%d: %u tasks;", tier, cnt);
3038         }
3039       }
3040     }
3041     st->cr();
3042 
3043 //    for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) {
3044 //      guarantee(jt != nullptr, "");
3045 //      if (jt->is_Compiler_thread()) {
3046 //        CompilerThread* ct = (CompilerThread*)jt;
3047 //
3048 //        guarantee(ct != nullptr, "");
3049 //        if (ct->queue() == queue) {
3050 //          ResourceMark rm;
3051 //          CompileTask* task = ct->task();
3052 //          st->print("    %s: ", ct->name_raw());
3053 //          if (task != nullptr) {
3054 //            task->print(st, nullptr, true /*short_form*/, false /*cr*/);
3055 //          }
3056 //          st->cr();
3057 //        }
3058 //      }
3059 //    }
3060   }
3061 }
3062 void CompileBroker::print_statistics_on(outputStream* st) {
3063   st->print_cr("  Total: %u methods; %u bailouts, %u invalidated, %u non_entrant",
3064                _total_compile_count, _total_bailout_count, _total_invalidated_count, _total_not_entrant_count);
3065   for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) {
3066     print_tier_helper(st, "Tier", tier, &_stats_per_level[tier-1]);
3067   }
3068   st->cr();
3069 
3070   if (AOTCodeCaching) {
3071     for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) {
3072       if (tier != CompLevel_full_profile) {
3073         print_tier_helper(st, "AOT Code T", tier, &_aot_stats_per_level[tier - 1]);
3074       }
3075     }
3076     st->cr();
3077   }
3078 
3079   print_queue_info(st, _c1_compile_queue);
3080   print_queue_info(st, _c2_compile_queue);
3081   print_queue_info(st, _ac1_compile_queue);
3082   print_queue_info(st, _ac2_compile_queue);
3083 }
3084 
3085 void CompileBroker::print_times(bool per_compiler, bool aggregate) {
3086   if (per_compiler) {
3087     if (aggregate) {
3088       tty->cr();
3089       tty->print_cr("[%dms] Individual compiler times (for compiled methods only)", (int)tty->time_stamp().milliseconds());
3090       tty->print_cr("------------------------------------------------");
3091       tty->cr();
3092     }
3093     for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) {
3094       AbstractCompiler* comp = _compilers[i];
3095       if (comp != nullptr) {
3096         print_times(comp->name(), comp->stats());
3097       }
3098     }
3099     if (_aot_stats._standard._count > 0) {
3100       print_times("SC", &_aot_stats);
3101     }
3102     if (aggregate) {
3103       tty->cr();
3104       tty->print_cr("Individual compilation Tier times (for compiled methods only)");
3105       tty->print_cr("------------------------------------------------");
3106       tty->cr();
3107     }
3108     char tier_name[256];
3109     for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) {
3110       CompilerStatistics* stats = &_stats_per_level[tier-1];
3111       os::snprintf_checked(tier_name, sizeof(tier_name), "Tier%d", tier);
3112       print_times(tier_name, stats);
3113     }
3114     for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) {
3115       CompilerStatistics* stats = &_aot_stats_per_level[tier-1];
3116       if (stats->_standard._bytes > 0) {
3117         os::snprintf_checked(tier_name, sizeof(tier_name), "AOT Code T%d", tier);
3118         print_times(tier_name, stats);
3119       }
3120     }
3121   }
3122 
3123   if (!aggregate) {
3124     return;
3125   }
3126 
3127   elapsedTimer standard_compilation = CompileBroker::_t_standard_compilation;
3128   elapsedTimer osr_compilation = CompileBroker::_t_osr_compilation;
3129   elapsedTimer total_compilation = CompileBroker::_t_total_compilation;
3130 
3131   uint standard_bytes_compiled = CompileBroker::_sum_standard_bytes_compiled;
3132   uint osr_bytes_compiled = CompileBroker::_sum_osr_bytes_compiled;
3133 
3134   uint standard_compile_count = CompileBroker::_total_standard_compile_count;
3135   uint osr_compile_count = CompileBroker::_total_osr_compile_count;
3136   uint total_compile_count = CompileBroker::_total_compile_count;
3137   uint total_bailout_count = CompileBroker::_total_bailout_count;
3138   uint total_invalidated_count = CompileBroker::_total_invalidated_count;
3139 
3140   uint nmethods_code_size = CompileBroker::_sum_nmethod_code_size;
3141   uint nmethods_size = CompileBroker::_sum_nmethod_size;
3142 
3143   tty->cr();
3144   tty->print_cr("Accumulated compiler times");
3145   tty->print_cr("----------------------------------------------------------");
3146                //0000000000111111111122222222223333333333444444444455555555556666666666
3147                //0123456789012345678901234567890123456789012345678901234567890123456789
3148   tty->print_cr("  Total compilation time   : %7.3f s", total_compilation.seconds());
3149   tty->print_cr("    Standard compilation   : %7.3f s, Average : %2.3f s",
3150                 standard_compilation.seconds(),
3151                 standard_compile_count == 0 ? 0.0 : standard_compilation.seconds() / standard_compile_count);
3152   tty->print_cr("    Bailed out compilation : %7.3f s, Average : %2.3f s",
3153                 CompileBroker::_t_bailedout_compilation.seconds(),
3154                 total_bailout_count == 0 ? 0.0 : CompileBroker::_t_bailedout_compilation.seconds() / total_bailout_count);
3155   tty->print_cr("    On stack replacement   : %7.3f s, Average : %2.3f s",
3156                 osr_compilation.seconds(),
3157                 osr_compile_count == 0 ? 0.0 : osr_compilation.seconds() / osr_compile_count);
3158   tty->print_cr("    Invalidated            : %7.3f s, Average : %2.3f s",
3159                 CompileBroker::_t_invalidated_compilation.seconds(),
3160                 total_invalidated_count == 0 ? 0.0 : CompileBroker::_t_invalidated_compilation.seconds() / total_invalidated_count);
3161 
3162   if (AOTCodeCaching) { // Check flags because AOT code cache could be closed already
3163     tty->cr();
3164     AOTCodeCache::print_timers_on(tty);
3165   }
3166   AbstractCompiler *comp = compiler(CompLevel_simple);
3167   if (comp != nullptr) {
3168     tty->cr();
3169     comp->print_timers();
3170   }
3171   comp = compiler(CompLevel_full_optimization);
3172   if (comp != nullptr) {
3173     tty->cr();
3174     comp->print_timers();
3175   }
3176 #if INCLUDE_JVMCI
3177   if (EnableJVMCI) {
3178     JVMCICompiler *jvmci_comp = JVMCICompiler::instance(false, JavaThread::current_or_null());
3179     if (jvmci_comp != nullptr && jvmci_comp != comp) {
3180       tty->cr();
3181       jvmci_comp->print_timers();
3182     }
3183   }
3184 #endif
3185 
3186   tty->cr();
3187   tty->print_cr("  Total compiled methods    : %8u methods", total_compile_count);
3188   tty->print_cr("    Standard compilation    : %8u methods", standard_compile_count);
3189   tty->print_cr("    On stack replacement    : %8u methods", osr_compile_count);
3190   uint tcb = osr_bytes_compiled + standard_bytes_compiled;
3191   tty->print_cr("  Total compiled bytecodes  : %8u bytes", tcb);
3192   tty->print_cr("    Standard compilation    : %8u bytes", standard_bytes_compiled);
3193   tty->print_cr("    On stack replacement    : %8u bytes", osr_bytes_compiled);
3194   double tcs = total_compilation.seconds();
3195   uint bps = tcs == 0.0 ? 0 : (uint)(tcb / tcs);
3196   tty->print_cr("  Average compilation speed : %8u bytes/s", bps);
3197   tty->cr();
3198   tty->print_cr("  nmethod code size         : %8u bytes", nmethods_code_size);
3199   tty->print_cr("  nmethod total size        : %8u bytes", nmethods_size);
3200 }
3201 
3202 // Print general/accumulated JIT information.
3203 void CompileBroker::print_info(outputStream *out) {
3204   if (out == nullptr) out = tty;
3205   out->cr();
3206   out->print_cr("======================");
3207   out->print_cr("   General JIT info   ");
3208   out->print_cr("======================");
3209   out->cr();
3210   out->print_cr("            JIT is : %7s",     should_compile_new_jobs() ? "on" : "off");
3211   out->print_cr("  Compiler threads : %7d",     (int)CICompilerCount);
3212   out->cr();
3213   out->print_cr("CodeCache overview");
3214   out->print_cr("--------------------------------------------------------");
3215   out->cr();
3216   out->print_cr("         Reserved size : %7zu KB", CodeCache::max_capacity() / K);
3217   out->print_cr("        Committed size : %7zu KB", CodeCache::capacity() / K);
3218   out->print_cr("  Unallocated capacity : %7zu KB", CodeCache::unallocated_capacity() / K);
3219   out->cr();
3220 }
3221 
3222 // Note: tty_lock must not be held upon entry to this function.
3223 //       Print functions called from herein do "micro-locking" on tty_lock.
3224 //       That's a tradeoff which keeps together important blocks of output.
3225 //       At the same time, continuous tty_lock hold time is kept in check,
3226 //       preventing concurrently printing threads from stalling a long time.
3227 void CompileBroker::print_heapinfo(outputStream* out, const char* function, size_t granularity) {
3228   TimeStamp ts_total;
3229   TimeStamp ts_global;
3230   TimeStamp ts;
3231 
3232   bool allFun = !strcmp(function, "all");
3233   bool aggregate = !strcmp(function, "aggregate") || !strcmp(function, "analyze") || allFun;
3234   bool usedSpace = !strcmp(function, "UsedSpace") || allFun;
3235   bool freeSpace = !strcmp(function, "FreeSpace") || allFun;
3236   bool methodCount = !strcmp(function, "MethodCount") || allFun;
3237   bool methodSpace = !strcmp(function, "MethodSpace") || allFun;
3238   bool methodAge = !strcmp(function, "MethodAge") || allFun;
3239   bool methodNames = !strcmp(function, "MethodNames") || allFun;
3240   bool discard = !strcmp(function, "discard") || allFun;
3241 
3242   if (out == nullptr) {
3243     out = tty;
3244   }
3245 
3246   if (!(aggregate || usedSpace || freeSpace || methodCount || methodSpace || methodAge || methodNames || discard)) {
3247     out->print_cr("\n__ CodeHeapStateAnalytics: Function %s is not supported", function);
3248     out->cr();
3249     return;
3250   }
3251 
3252   ts_total.update(); // record starting point
3253 
3254   if (aggregate) {
3255     print_info(out);
3256   }
3257 
3258   // We hold the CodeHeapStateAnalytics_lock all the time, from here until we leave this function.
3259   // That prevents other threads from destroying (making inconsistent) our view on the CodeHeap.
3260   // When we request individual parts of the analysis via the jcmd interface, it is possible
3261   // that in between another thread (another jcmd user or the vm running into CodeCache OOM)
3262   // updated the aggregated data. We will then see a modified, but again consistent, view
3263   // on the CodeHeap. That's a tolerable tradeoff we have to accept because we can't hold
3264   // a lock across user interaction.
3265 
3266   // We should definitely acquire this lock before acquiring Compile_lock and CodeCache_lock.
3267   // CodeHeapStateAnalytics_lock may be held by a concurrent thread for a long time,
3268   // leading to an unnecessarily long hold time of the other locks we acquired before.
3269   ts.update(); // record starting point
3270   MutexLocker mu0(CodeHeapStateAnalytics_lock, Mutex::_safepoint_check_flag);
3271   out->print_cr("\n__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________\n", ts.seconds());
3272 
3273   // Holding the CodeCache_lock protects from concurrent alterations of the CodeCache.
3274   // Unfortunately, such protection is not sufficient:
3275   // When a new nmethod is created via ciEnv::register_method(), the
3276   // Compile_lock is taken first. After some initializations,
3277   // nmethod::new_nmethod() takes over, grabbing the CodeCache_lock
3278   // immediately (after finalizing the oop references). To lock out concurrent
3279   // modifiers, we have to grab both locks as well in the described sequence.
3280   //
3281   // If we serve an "allFun" call, it is beneficial to hold CodeCache_lock and Compile_lock
3282   // for the entire duration of aggregation and printing. That makes sure we see
3283   // a consistent picture and do not run into issues caused by concurrent alterations.
3284   bool should_take_Compile_lock   = !SafepointSynchronize::is_at_safepoint() &&
3285                                     !Compile_lock->owned_by_self();
3286   bool should_take_CodeCache_lock = !SafepointSynchronize::is_at_safepoint() &&
3287                                     !CodeCache_lock->owned_by_self();
3288   bool take_global_lock_1   =  allFun && should_take_Compile_lock;
3289   bool take_global_lock_2   =  allFun && should_take_CodeCache_lock;
3290   bool take_function_lock_1 = !allFun && should_take_Compile_lock;
3291   bool take_function_lock_2 = !allFun && should_take_CodeCache_lock;
3292   bool take_global_locks    = take_global_lock_1 || take_global_lock_2;
3293   bool take_function_locks  = take_function_lock_1 || take_function_lock_2;
3294 
3295   ts_global.update(); // record starting point
3296 
3297   ConditionalMutexLocker mu1(Compile_lock, take_global_lock_1, Mutex::_safepoint_check_flag);
3298   ConditionalMutexLocker mu2(CodeCache_lock, take_global_lock_2, Mutex::_no_safepoint_check_flag);
3299   if (take_global_locks) {
3300     out->print_cr("\n__ Compile & CodeCache (global) lock wait took %10.3f seconds _________\n", ts_global.seconds());
3301     ts_global.update(); // record starting point
3302   }
3303 
3304   if (aggregate) {
3305     ts.update(); // record starting point
3306     ConditionalMutexLocker mu11(Compile_lock, take_function_lock_1,  Mutex::_safepoint_check_flag);
3307     ConditionalMutexLocker mu22(CodeCache_lock, take_function_lock_2, Mutex::_no_safepoint_check_flag);
3308     if (take_function_locks) {
3309       out->print_cr("\n__ Compile & CodeCache (function) lock wait took %10.3f seconds _________\n", ts.seconds());
3310     }
3311 
3312     ts.update(); // record starting point
3313     CodeCache::aggregate(out, granularity);
3314     if (take_function_locks) {
3315       out->print_cr("\n__ Compile & CodeCache (function) lock hold took %10.3f seconds _________\n", ts.seconds());
3316     }
3317   }
3318 
3319   if (usedSpace) CodeCache::print_usedSpace(out);
3320   if (freeSpace) CodeCache::print_freeSpace(out);
3321   if (methodCount) CodeCache::print_count(out);
3322   if (methodSpace) CodeCache::print_space(out);
3323   if (methodAge) CodeCache::print_age(out);
3324   if (methodNames) {
3325     if (allFun) {
3326       // print_names() can only be used safely if the locks have been continuously held
3327       // since aggregation begin. That is true only for function "all".
3328       CodeCache::print_names(out);
3329     } else {
3330       out->print_cr("\nCodeHeapStateAnalytics: Function 'MethodNames' is only available as part of function 'all'");
3331     }
3332   }
3333   if (discard) CodeCache::discard(out);
3334 
3335   if (take_global_locks) {
3336     out->print_cr("\n__ Compile & CodeCache (global) lock hold took %10.3f seconds _________\n", ts_global.seconds());
3337   }
3338   out->print_cr("\n__ CodeHeapStateAnalytics total duration %10.3f seconds _________\n", ts_total.seconds());
3339 }