1 /* 2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/aotLinkedClassBulkLoader.hpp" 26 #include "cds/cdsConfig.hpp" 27 #include "classfile/javaClasses.inline.hpp" 28 #include "classfile/symbolTable.hpp" 29 #include "classfile/vmClasses.hpp" 30 #include "classfile/vmSymbols.hpp" 31 #include "code/aotCodeCache.hpp" 32 #include "code/codeCache.hpp" 33 #include "code/codeHeapState.hpp" 34 #include "code/dependencyContext.hpp" 35 #include "compiler/compilationLog.hpp" 36 #include "compiler/compilationMemoryStatistic.hpp" 37 #include "compiler/compilationPolicy.hpp" 38 #include "compiler/compileBroker.hpp" 39 #include "compiler/compileLog.hpp" 40 #include "compiler/compilerDefinitions.inline.hpp" 41 #include "compiler/compilerEvent.hpp" 42 #include "compiler/compilerOracle.hpp" 43 #include "compiler/directivesParser.hpp" 44 #include "compiler/recompilationPolicy.hpp" 45 #include "gc/shared/memAllocator.hpp" 46 #include "interpreter/linkResolver.hpp" 47 #include "jfr/jfrEvents.hpp" 48 #include "jvm.h" 49 #include "logging/log.hpp" 50 #include "logging/logStream.hpp" 51 #include "memory/allocation.inline.hpp" 52 #include "memory/resourceArea.hpp" 53 #include "memory/universe.hpp" 54 #include "oops/method.inline.hpp" 55 #include "oops/methodData.hpp" 56 #include "oops/oop.inline.hpp" 57 #include "prims/jvmtiExport.hpp" 58 #include "prims/nativeLookup.hpp" 59 #include "prims/whitebox.hpp" 60 #include "runtime/atomicAccess.hpp" 61 #include "runtime/escapeBarrier.hpp" 62 #include "runtime/globals_extension.hpp" 63 #include "runtime/handles.inline.hpp" 64 #include "runtime/init.hpp" 65 #include "runtime/interfaceSupport.inline.hpp" 66 #include "runtime/java.hpp" 67 #include "runtime/javaCalls.hpp" 68 #include "runtime/jniHandles.inline.hpp" 69 #include "runtime/os.hpp" 70 #include "runtime/perfData.hpp" 71 #include "runtime/safepointVerifiers.hpp" 72 #include "runtime/sharedRuntime.hpp" 73 #include "runtime/threads.hpp" 74 #include "runtime/threadSMR.inline.hpp" 75 #include "runtime/timerTrace.hpp" 76 #include "runtime/vframe.inline.hpp" 77 #include "services/management.hpp" 78 #include "utilities/debug.hpp" 79 #include "utilities/dtrace.hpp" 80 #include "utilities/events.hpp" 81 #include "utilities/formatBuffer.hpp" 82 #include "utilities/macros.hpp" 83 #include "utilities/nonblockingQueue.inline.hpp" 84 #ifdef COMPILER1 85 #include "c1/c1_Compiler.hpp" 86 #endif 87 #ifdef COMPILER2 88 #include "opto/c2compiler.hpp" 89 #endif 90 #if INCLUDE_JVMCI 91 #include "jvmci/jvmciEnv.hpp" 92 #include "jvmci/jvmciRuntime.hpp" 93 #endif 94 95 #ifdef DTRACE_ENABLED 96 97 // Only bother with this argument setup if dtrace is available 98 99 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \ 100 { \ 101 Symbol* klass_name = (method)->klass_name(); \ 102 Symbol* name = (method)->name(); \ 103 Symbol* signature = (method)->signature(); \ 104 HOTSPOT_METHOD_COMPILE_BEGIN( \ 105 (char *) comp_name, strlen(comp_name), \ 106 (char *) klass_name->bytes(), klass_name->utf8_length(), \ 107 (char *) name->bytes(), name->utf8_length(), \ 108 (char *) signature->bytes(), signature->utf8_length()); \ 109 } 110 111 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \ 112 { \ 113 Symbol* klass_name = (method)->klass_name(); \ 114 Symbol* name = (method)->name(); \ 115 Symbol* signature = (method)->signature(); \ 116 HOTSPOT_METHOD_COMPILE_END( \ 117 (char *) comp_name, strlen(comp_name), \ 118 (char *) klass_name->bytes(), klass_name->utf8_length(), \ 119 (char *) name->bytes(), name->utf8_length(), \ 120 (char *) signature->bytes(), signature->utf8_length(), (success)); \ 121 } 122 123 #else // ndef DTRACE_ENABLED 124 125 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) 126 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) 127 128 #endif // ndef DTRACE_ENABLED 129 130 bool CompileBroker::_initialized = false; 131 volatile bool CompileBroker::_should_block = false; 132 volatile int CompileBroker::_print_compilation_warning = 0; 133 volatile jint CompileBroker::_should_compile_new_jobs = run_compilation; 134 135 // The installed compiler(s) 136 AbstractCompiler* CompileBroker::_compilers[2]; 137 138 // The maximum numbers of compiler threads to be determined during startup. 139 int CompileBroker::_c1_count = 0; 140 int CompileBroker::_c2_count = 0; 141 int CompileBroker::_ac_count = 0; 142 143 // An array of compiler names as Java String objects 144 jobject* CompileBroker::_compiler1_objects = nullptr; 145 jobject* CompileBroker::_compiler2_objects = nullptr; 146 jobject* CompileBroker::_ac_objects = nullptr; 147 148 CompileLog** CompileBroker::_compiler1_logs = nullptr; 149 CompileLog** CompileBroker::_compiler2_logs = nullptr; 150 CompileLog** CompileBroker::_ac_logs = nullptr; 151 152 // These counters are used to assign an unique ID to each compilation. 153 volatile jint CompileBroker::_compilation_id = 0; 154 volatile jint CompileBroker::_osr_compilation_id = 0; 155 volatile jint CompileBroker::_native_compilation_id = 0; 156 157 // Performance counters 158 PerfCounter* CompileBroker::_perf_total_compilation = nullptr; 159 PerfCounter* CompileBroker::_perf_osr_compilation = nullptr; 160 PerfCounter* CompileBroker::_perf_standard_compilation = nullptr; 161 162 PerfCounter* CompileBroker::_perf_total_bailout_count = nullptr; 163 PerfCounter* CompileBroker::_perf_total_invalidated_count = nullptr; 164 PerfCounter* CompileBroker::_perf_total_compile_count = nullptr; 165 PerfCounter* CompileBroker::_perf_total_osr_compile_count = nullptr; 166 PerfCounter* CompileBroker::_perf_total_standard_compile_count = nullptr; 167 168 PerfCounter* CompileBroker::_perf_sum_osr_bytes_compiled = nullptr; 169 PerfCounter* CompileBroker::_perf_sum_standard_bytes_compiled = nullptr; 170 PerfCounter* CompileBroker::_perf_sum_nmethod_size = nullptr; 171 PerfCounter* CompileBroker::_perf_sum_nmethod_code_size = nullptr; 172 173 PerfStringVariable* CompileBroker::_perf_last_method = nullptr; 174 PerfStringVariable* CompileBroker::_perf_last_failed_method = nullptr; 175 PerfStringVariable* CompileBroker::_perf_last_invalidated_method = nullptr; 176 PerfVariable* CompileBroker::_perf_last_compile_type = nullptr; 177 PerfVariable* CompileBroker::_perf_last_compile_size = nullptr; 178 PerfVariable* CompileBroker::_perf_last_failed_type = nullptr; 179 PerfVariable* CompileBroker::_perf_last_invalidated_type = nullptr; 180 181 // Timers and counters for generating statistics 182 elapsedTimer CompileBroker::_t_total_compilation; 183 elapsedTimer CompileBroker::_t_osr_compilation; 184 elapsedTimer CompileBroker::_t_standard_compilation; 185 elapsedTimer CompileBroker::_t_invalidated_compilation; 186 elapsedTimer CompileBroker::_t_bailedout_compilation; 187 188 uint CompileBroker::_total_bailout_count = 0; 189 uint CompileBroker::_total_invalidated_count = 0; 190 uint CompileBroker::_total_not_entrant_count = 0; 191 uint CompileBroker::_total_compile_count = 0; 192 uint CompileBroker::_total_osr_compile_count = 0; 193 uint CompileBroker::_total_standard_compile_count = 0; 194 uint CompileBroker::_total_compiler_stopped_count = 0; 195 uint CompileBroker::_total_compiler_restarted_count = 0; 196 197 uint CompileBroker::_sum_osr_bytes_compiled = 0; 198 uint CompileBroker::_sum_standard_bytes_compiled = 0; 199 uint CompileBroker::_sum_nmethod_size = 0; 200 uint CompileBroker::_sum_nmethod_code_size = 0; 201 202 jlong CompileBroker::_peak_compilation_time = 0; 203 204 CompilerStatistics CompileBroker::_stats_per_level[CompLevel_full_optimization]; 205 CompilerStatistics CompileBroker::_aot_stats; 206 CompilerStatistics CompileBroker::_aot_stats_per_level[CompLevel_full_optimization + 1]; 207 208 CompileQueue* CompileBroker::_c2_compile_queue = nullptr; 209 CompileQueue* CompileBroker::_c1_compile_queue = nullptr; 210 CompileQueue* CompileBroker::_ac1_compile_queue = nullptr; 211 CompileQueue* CompileBroker::_ac2_compile_queue = nullptr; 212 213 bool compileBroker_init() { 214 if (LogEvents) { 215 CompilationLog::init(); 216 } 217 218 // init directives stack, adding default directive 219 DirectivesStack::init(); 220 221 if (DirectivesParser::has_file()) { 222 return DirectivesParser::parse_from_flag(); 223 } else if (CompilerDirectivesPrint) { 224 // Print default directive even when no other was added 225 DirectivesStack::print(tty); 226 } 227 228 return true; 229 } 230 231 CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) { 232 CompilerThread* thread = CompilerThread::current(); 233 thread->set_task(task); 234 CompileLog* log = thread->log(); 235 thread->timeout()->arm(); 236 if (log != nullptr && !task->is_unloaded()) task->log_task_start(log); 237 } 238 239 CompileTaskWrapper::~CompileTaskWrapper() { 240 CompilerThread* thread = CompilerThread::current(); 241 242 // First, disarm the timeout. This still relies on the underlying task. 243 thread->timeout()->disarm(); 244 245 CompileTask* task = thread->task(); 246 CompileLog* log = thread->log(); 247 AbstractCompiler* comp = thread->compiler(); 248 if (log != nullptr && !task->is_unloaded()) task->log_task_done(log); 249 thread->set_task(nullptr); 250 thread->set_env(nullptr); 251 if (task->is_blocking()) { 252 bool free_task = false; 253 { 254 MutexLocker notifier(thread, CompileTaskWait_lock); 255 task->mark_complete(); 256 #if INCLUDE_JVMCI 257 if (comp->is_jvmci()) { 258 if (!task->has_waiter()) { 259 // The waiting thread timed out and thus did not delete the task. 260 free_task = true; 261 } 262 task->set_blocking_jvmci_compile_state(nullptr); 263 } 264 #endif 265 if (!free_task) { 266 // Notify the waiting thread that the compilation has completed 267 // so that it can free the task. 268 CompileTaskWait_lock->notify_all(); 269 } 270 } 271 if (free_task) { 272 // The task can only be deleted once the task lock is released. 273 delete task; 274 } 275 } else { 276 task->mark_complete(); 277 278 // By convention, the compiling thread is responsible for deleting 279 // a non-blocking CompileTask. 280 delete task; 281 } 282 } 283 284 /** 285 * Check if a CompilerThread can be removed and update count if requested. 286 */ 287 bool CompileBroker::can_remove(CompilerThread *ct, bool do_it) { 288 assert(UseDynamicNumberOfCompilerThreads, "or shouldn't be here"); 289 if (!ReduceNumberOfCompilerThreads) return false; 290 291 if (RecompilationPolicy::have_recompilation_work()) return false; 292 293 AbstractCompiler *compiler = ct->compiler(); 294 int compiler_count = compiler->num_compiler_threads(); 295 bool c1 = compiler->is_c1(); 296 297 // Keep at least 1 compiler thread of each type. 298 if (compiler_count < 2) return false; 299 300 // Keep thread alive for at least some time. 301 if (ct->idle_time_millis() < (c1 ? 500 : 100)) return false; 302 303 #if INCLUDE_JVMCI 304 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) { 305 // Handles for JVMCI thread objects may get released concurrently. 306 if (do_it) { 307 assert(CompileThread_lock->owner() == ct, "must be holding lock"); 308 } else { 309 // Skip check if it's the last thread and let caller check again. 310 return true; 311 } 312 } 313 #endif 314 315 // We only allow the last compiler thread of each type to get removed. 316 jobject last_compiler = c1 ? compiler1_object(compiler_count - 1) 317 : compiler2_object(compiler_count - 1); 318 if (ct->threadObj() == JNIHandles::resolve_non_null(last_compiler)) { 319 if (do_it) { 320 assert_locked_or_safepoint(CompileThread_lock); // Update must be consistent. 321 compiler->set_num_compiler_threads(compiler_count - 1); 322 #if INCLUDE_JVMCI 323 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) { 324 // Old j.l.Thread object can die when no longer referenced elsewhere. 325 JNIHandles::destroy_global(compiler2_object(compiler_count - 1)); 326 _compiler2_objects[compiler_count - 1] = nullptr; 327 } 328 #endif 329 } 330 return true; 331 } 332 return false; 333 } 334 335 /** 336 * Add a CompileTask to a CompileQueue. 337 */ 338 void CompileQueue::add(CompileTask* task) { 339 assert(_lock->owned_by_self(), "must own lock"); 340 341 task->set_next(nullptr); 342 task->set_prev(nullptr); 343 344 if (_last == nullptr) { 345 // The compile queue is empty. 346 assert(_first == nullptr, "queue is empty"); 347 _first = task; 348 _last = task; 349 } else { 350 // Append the task to the queue. 351 assert(_last->next() == nullptr, "not last"); 352 _last->set_next(task); 353 task->set_prev(_last); 354 _last = task; 355 } 356 ++_size; 357 ++_total_added; 358 if (_size > _peak_size) { 359 _peak_size = _size; 360 } 361 362 // Mark the method as being in the compile queue. 363 task->method()->set_queued_for_compilation(); 364 365 task->mark_queued(os::elapsed_counter()); 366 367 if (CIPrintCompileQueue) { 368 print_tty(); 369 } 370 371 if (LogCompilation && xtty != nullptr) { 372 task->log_task_queued(); 373 } 374 375 if (TrainingData::need_data() && !CDSConfig::is_dumping_final_static_archive()) { 376 CompileTrainingData* ctd = CompileTrainingData::make(task); 377 if (ctd != nullptr) { 378 task->set_training_data(ctd); 379 } 380 } 381 382 // Notify CompilerThreads that a task is available. 383 _lock->notify_all(); 384 } 385 386 void CompileQueue::add_pending(CompileTask* task) { 387 assert(_lock->owned_by_self() == false, "must NOT own lock"); 388 assert(UseLockFreeCompileQueues, ""); 389 task->method()->set_queued_for_compilation(); 390 _queue.push(*task); 391 // FIXME: additional coordination needed? e.g., is it possible for compiler thread to block w/o processing pending tasks? 392 if (is_empty()) { 393 MutexLocker ml(_lock); 394 _lock->notify_all(); 395 } 396 } 397 398 static bool process_pending(CompileTask* task) { 399 // guarantee(task->method()->queued_for_compilation(), ""); 400 if (task->is_unloaded()) { 401 return true; // unloaded 402 } 403 task->method()->set_queued_for_compilation(); // FIXME 404 if (task->method()->pending_queue_processed()) { 405 return true; // already queued 406 } 407 // Mark the method as being in the compile queue. 408 task->method()->set_pending_queue_processed(); 409 if (CompileBroker::compilation_is_complete(task->method(), task->osr_bci(), task->comp_level(), 410 task->requires_online_compilation(), task->compile_reason())) { 411 return true; // already compiled 412 } 413 return false; // active 414 } 415 416 void CompileQueue::transfer_pending() { 417 assert(_lock->owned_by_self(), "must own lock"); 418 419 CompileTask* task; 420 while ((task = _queue.pop()) != nullptr) { 421 bool is_stale = process_pending(task); 422 if (is_stale) { 423 task->set_next(_first_stale); 424 task->set_prev(nullptr); 425 _first_stale = task; 426 } else { 427 add(task); 428 } 429 } 430 } 431 432 /** 433 * Empties compilation queue by deleting all compilation tasks. 434 * Furthermore, the method wakes up all threads that are waiting 435 * on a compilation task to finish. This can happen if background 436 * compilation is disabled. 437 */ 438 void CompileQueue::delete_all() { 439 MutexLocker mu(_lock); 440 transfer_pending(); 441 442 CompileTask* current = _first; 443 444 // Iterate over all tasks in the compile queue 445 while (current != nullptr) { 446 CompileTask* next = current->next(); 447 if (!current->is_blocking()) { 448 // Non-blocking task. No one is waiting for it, delete it now. 449 delete current; 450 } else { 451 // Blocking task. By convention, it is the waiters responsibility 452 // to delete the task. We cannot delete it here, because we do not 453 // coordinate with waiters. We will notify the waiters later. 454 } 455 current = next; 456 } 457 _first = nullptr; 458 _last = nullptr; 459 460 // Wake up all blocking task waiters to deal with remaining blocking 461 // tasks. This is not a performance sensitive path, so we do this 462 // unconditionally to simplify coding/testing. 463 { 464 MonitorLocker ml(Thread::current(), CompileTaskWait_lock); 465 ml.notify_all(); 466 } 467 468 // Wake up all threads that block on the queue. 469 _lock->notify_all(); 470 } 471 472 /** 473 * Get the next CompileTask from a CompileQueue 474 */ 475 CompileTask* CompileQueue::get(CompilerThread* thread) { 476 // save methods from RedefineClasses across safepoint 477 // across compile queue lock below. 478 methodHandle save_method; 479 480 MonitorLocker locker(_lock); 481 transfer_pending(); 482 483 RecompilationPolicy::sample_load_average(); 484 485 // If _first is null we have no more compile jobs. There are two reasons for 486 // having no compile jobs: First, we compiled everything we wanted. Second, 487 // we ran out of code cache so compilation has been disabled. In the latter 488 // case we perform code cache sweeps to free memory such that we can re-enable 489 // compilation. 490 while (_first == nullptr) { 491 // Exit loop if compilation is disabled forever 492 if (CompileBroker::is_compilation_disabled_forever()) { 493 return nullptr; 494 } 495 496 AbstractCompiler* compiler = thread->compiler(); 497 guarantee(compiler != nullptr, "Compiler object must exist"); 498 compiler->on_empty_queue(this, thread); 499 if (_first != nullptr) { 500 // The call to on_empty_queue may have temporarily unlocked the MCQ lock 501 // so check again whether any tasks were added to the queue. 502 break; 503 } 504 505 // If we have added stale tasks, there might be waiters that want 506 // the notification these tasks have failed. Normally, this would 507 // be done by a compiler thread that would perform the purge at 508 // the end of some compilation. But, if compile queue is empty, 509 // there is no guarantee compilers would run and do the purge. 510 // Do the purge here and now to unblock the waiters. 511 // Perform this until we run out of stale tasks. 512 while (_first_stale != nullptr) { 513 purge_stale_tasks(); 514 } 515 if (_first != nullptr) { 516 // Purge stale tasks may have transferred some new tasks, 517 // so check again. 518 break; 519 } 520 521 // If there are no compilation tasks and we can compile new jobs 522 // (i.e., there is enough free space in the code cache) there is 523 // no need to invoke the GC. 524 // We need a timed wait here, since compiler threads can exit if compilation 525 // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads 526 // is not critical and we do not want idle compiler threads to wake up too often. 527 locker.wait(5*1000); 528 529 transfer_pending(); // reacquired lock 530 531 if (RecompilationPolicy::have_recompilation_work()) return nullptr; 532 533 if (UseDynamicNumberOfCompilerThreads && _first == nullptr) { 534 // Still nothing to compile. Give caller a chance to stop this thread. 535 if (CompileBroker::can_remove(CompilerThread::current(), false)) return nullptr; 536 } 537 } 538 539 if (CompileBroker::is_compilation_disabled_forever()) { 540 return nullptr; 541 } 542 543 CompileTask* task; 544 { 545 NoSafepointVerifier nsv; 546 task = CompilationPolicy::select_task(this, thread); 547 if (task != nullptr) { 548 task = task->select_for_compilation(); 549 } 550 } 551 552 if (task != nullptr) { 553 // Save method pointers across unlock safepoint. The task is removed from 554 // the compilation queue, which is walked during RedefineClasses. 555 Thread* thread = Thread::current(); 556 save_method = methodHandle(thread, task->method()); 557 558 remove(task); 559 } 560 purge_stale_tasks(); // may temporarily release MCQ lock 561 return task; 562 } 563 564 // Clean & deallocate stale compile tasks. 565 // Temporarily releases MethodCompileQueue lock. 566 void CompileQueue::purge_stale_tasks() { 567 assert(_lock->owned_by_self(), "must own lock"); 568 if (_first_stale != nullptr) { 569 // Stale tasks are purged when MCQ lock is released, 570 // but _first_stale updates are protected by MCQ lock. 571 // Once task processing starts and MCQ lock is released, 572 // other compiler threads can reuse _first_stale. 573 CompileTask* head = _first_stale; 574 _first_stale = nullptr; 575 { 576 MutexUnlocker ul(_lock); 577 for (CompileTask* task = head; task != nullptr; ) { 578 CompileTask* next_task = task->next(); 579 task->set_next(nullptr); 580 CompileTaskWrapper ctw(task); // Frees the task 581 task->set_failure_reason("stale task"); 582 task = next_task; 583 } 584 } 585 transfer_pending(); // transfer pending after reacquiring MCQ lock 586 } 587 } 588 589 void CompileQueue::remove(CompileTask* task) { 590 assert(_lock->owned_by_self(), "must own lock"); 591 if (task->prev() != nullptr) { 592 task->prev()->set_next(task->next()); 593 } else { 594 // max is the first element 595 assert(task == _first, "Sanity"); 596 _first = task->next(); 597 } 598 599 if (task->next() != nullptr) { 600 task->next()->set_prev(task->prev()); 601 } else { 602 // max is the last element 603 assert(task == _last, "Sanity"); 604 _last = task->prev(); 605 } 606 task->set_next(nullptr); 607 task->set_prev(nullptr); 608 --_size; 609 ++_total_removed; 610 } 611 612 void CompileQueue::remove_and_mark_stale(CompileTask* task) { 613 assert(_lock->owned_by_self(), "must own lock"); 614 remove(task); 615 616 // Enqueue the task for reclamation (should be done outside MCQ lock) 617 task->set_next(_first_stale); 618 task->set_prev(nullptr); 619 _first_stale = task; 620 } 621 622 // methods in the compile queue need to be marked as used on the stack 623 // so that they don't get reclaimed by Redefine Classes 624 void CompileQueue::mark_on_stack() { 625 for (CompileTask* task = _first; task != nullptr; task = task->next()) { 626 task->mark_on_stack(); 627 } 628 for (CompileTask* task = _queue.first(); !_queue.is_end(task); task = task->next()) { 629 assert(task != nullptr, ""); 630 task->mark_on_stack(); 631 } 632 } 633 634 635 CompileQueue* CompileBroker::compile_queue(int comp_level, bool is_aot) { 636 if (is_c2_compile(comp_level)) return ((is_aot && (_ac_count > 0)) ? _ac2_compile_queue : _c2_compile_queue); 637 if (is_c1_compile(comp_level)) return ((is_aot && (_ac_count > 0)) ? _ac1_compile_queue : _c1_compile_queue); 638 return nullptr; 639 } 640 641 CompileQueue* CompileBroker::c1_compile_queue() { 642 return _c1_compile_queue; 643 } 644 645 CompileQueue* CompileBroker::c2_compile_queue() { 646 return _c2_compile_queue; 647 } 648 649 void CompileBroker::print_compile_queues(outputStream* st) { 650 st->print_cr("Current compiles: "); 651 652 char buf[2000]; 653 int buflen = sizeof(buf); 654 Threads::print_threads_compiling(st, buf, buflen, /* short_form = */ true); 655 656 st->cr(); 657 if (_c1_compile_queue != nullptr) { 658 _c1_compile_queue->print(st); 659 } 660 if (_c2_compile_queue != nullptr) { 661 _c2_compile_queue->print(st); 662 } 663 if (_ac1_compile_queue != nullptr) { 664 _ac1_compile_queue->print(st); 665 } 666 if (_ac2_compile_queue != nullptr) { 667 _ac2_compile_queue->print(st); 668 } 669 } 670 671 void CompileQueue::print(outputStream* st) { 672 assert_locked_or_safepoint(_lock); 673 st->print_cr("%s:", name()); 674 CompileTask* task = _first; 675 if (task == nullptr) { 676 st->print_cr("Empty"); 677 } else { 678 while (task != nullptr) { 679 task->print(st, nullptr, true, true); 680 task = task->next(); 681 } 682 } 683 st->cr(); 684 } 685 686 void CompileQueue::print_tty() { 687 stringStream ss; 688 // Dump the compile queue into a buffer before locking the tty 689 print(&ss); 690 { 691 ttyLocker ttyl; 692 tty->print("%s", ss.freeze()); 693 } 694 } 695 696 CompilerCounters::CompilerCounters() { 697 _current_method[0] = '\0'; 698 _compile_type = CompileBroker::no_compile; 699 } 700 701 #if INCLUDE_JFR && COMPILER2_OR_JVMCI 702 // It appends new compiler phase names to growable array phase_names(a new CompilerPhaseType mapping 703 // in compiler/compilerEvent.cpp) and registers it with its serializer. 704 // 705 // c2 uses explicit CompilerPhaseType idToPhase mapping in opto/phasetype.hpp, 706 // so if c2 is used, it should be always registered first. 707 // This function is called during vm initialization. 708 static void register_jfr_phasetype_serializer(CompilerType compiler_type) { 709 ResourceMark rm; 710 static bool first_registration = true; 711 if (compiler_type == compiler_jvmci) { 712 CompilerEvent::PhaseEvent::get_phase_id("NOT_A_PHASE_NAME", false, false, false); 713 first_registration = false; 714 #ifdef COMPILER2 715 } else if (compiler_type == compiler_c2) { 716 assert(first_registration, "invariant"); // c2 must be registered first. 717 for (int i = 0; i < PHASE_NUM_TYPES; i++) { 718 const char* phase_name = CompilerPhaseTypeHelper::to_description((CompilerPhaseType) i); 719 CompilerEvent::PhaseEvent::get_phase_id(phase_name, false, false, false); 720 } 721 first_registration = false; 722 #endif // COMPILER2 723 } 724 } 725 #endif // INCLUDE_JFR && COMPILER2_OR_JVMCI 726 727 // ------------------------------------------------------------------ 728 // CompileBroker::compilation_init 729 // 730 // Initialize the Compilation object 731 void CompileBroker::compilation_init(JavaThread* THREAD) { 732 // No need to initialize compilation system if we do not use it. 733 if (!UseCompiler) { 734 return; 735 } 736 // Set the interface to the current compiler(s). 737 _c1_count = CompilationPolicy::c1_count(); 738 _c2_count = CompilationPolicy::c2_count(); 739 _ac_count = CompilationPolicy::ac_count(); 740 741 #if INCLUDE_JVMCI 742 if (EnableJVMCI) { 743 // This is creating a JVMCICompiler singleton. 744 JVMCICompiler* jvmci = new JVMCICompiler(); 745 746 if (UseJVMCICompiler) { 747 _compilers[1] = jvmci; 748 if (FLAG_IS_DEFAULT(JVMCIThreads)) { 749 if (BootstrapJVMCI) { 750 // JVMCI will bootstrap so give it more threads 751 _c2_count = MIN2(32, os::active_processor_count()); 752 } 753 } else { 754 _c2_count = JVMCIThreads; 755 } 756 if (FLAG_IS_DEFAULT(JVMCIHostThreads)) { 757 } else { 758 #ifdef COMPILER1 759 _c1_count = JVMCIHostThreads; 760 #endif // COMPILER1 761 } 762 } 763 } 764 #endif // INCLUDE_JVMCI 765 766 #ifdef COMPILER1 767 if (_c1_count > 0) { 768 _compilers[0] = new Compiler(); 769 } 770 #endif // COMPILER1 771 772 #ifdef COMPILER2 773 if (true JVMCI_ONLY( && !UseJVMCICompiler)) { 774 if (_c2_count > 0) { 775 _compilers[1] = new C2Compiler(); 776 // Register c2 first as c2 CompilerPhaseType idToPhase mapping is explicit. 777 // idToPhase mapping for c2 is in opto/phasetype.hpp 778 JFR_ONLY(register_jfr_phasetype_serializer(compiler_c2);) 779 } 780 } 781 #endif // COMPILER2 782 783 #if INCLUDE_JVMCI 784 // Register after c2 registration. 785 // JVMCI CompilerPhaseType idToPhase mapping is dynamic. 786 if (EnableJVMCI) { 787 JFR_ONLY(register_jfr_phasetype_serializer(compiler_jvmci);) 788 } 789 #endif // INCLUDE_JVMCI 790 791 if (CompilerOracle::should_collect_memstat()) { 792 CompilationMemoryStatistic::initialize(); 793 } 794 795 // Start the compiler thread(s) 796 init_compiler_threads(); 797 // totalTime performance counter is always created as it is required 798 // by the implementation of java.lang.management.CompilationMXBean. 799 { 800 // Ensure OOM leads to vm_exit_during_initialization. 801 EXCEPTION_MARK; 802 _perf_total_compilation = 803 PerfDataManager::create_counter(JAVA_CI, "totalTime", 804 PerfData::U_Ticks, CHECK); 805 } 806 807 if (UsePerfData) { 808 809 EXCEPTION_MARK; 810 811 // create the jvmstat performance counters 812 _perf_osr_compilation = 813 PerfDataManager::create_counter(SUN_CI, "osrTime", 814 PerfData::U_Ticks, CHECK); 815 816 _perf_standard_compilation = 817 PerfDataManager::create_counter(SUN_CI, "standardTime", 818 PerfData::U_Ticks, CHECK); 819 820 _perf_total_bailout_count = 821 PerfDataManager::create_counter(SUN_CI, "totalBailouts", 822 PerfData::U_Events, CHECK); 823 824 _perf_total_invalidated_count = 825 PerfDataManager::create_counter(SUN_CI, "totalInvalidates", 826 PerfData::U_Events, CHECK); 827 828 _perf_total_compile_count = 829 PerfDataManager::create_counter(SUN_CI, "totalCompiles", 830 PerfData::U_Events, CHECK); 831 _perf_total_osr_compile_count = 832 PerfDataManager::create_counter(SUN_CI, "osrCompiles", 833 PerfData::U_Events, CHECK); 834 835 _perf_total_standard_compile_count = 836 PerfDataManager::create_counter(SUN_CI, "standardCompiles", 837 PerfData::U_Events, CHECK); 838 839 _perf_sum_osr_bytes_compiled = 840 PerfDataManager::create_counter(SUN_CI, "osrBytes", 841 PerfData::U_Bytes, CHECK); 842 843 _perf_sum_standard_bytes_compiled = 844 PerfDataManager::create_counter(SUN_CI, "standardBytes", 845 PerfData::U_Bytes, CHECK); 846 847 _perf_sum_nmethod_size = 848 PerfDataManager::create_counter(SUN_CI, "nmethodSize", 849 PerfData::U_Bytes, CHECK); 850 851 _perf_sum_nmethod_code_size = 852 PerfDataManager::create_counter(SUN_CI, "nmethodCodeSize", 853 PerfData::U_Bytes, CHECK); 854 855 _perf_last_method = 856 PerfDataManager::create_string_variable(SUN_CI, "lastMethod", 857 CompilerCounters::cmname_buffer_length, 858 "", CHECK); 859 860 _perf_last_failed_method = 861 PerfDataManager::create_string_variable(SUN_CI, "lastFailedMethod", 862 CompilerCounters::cmname_buffer_length, 863 "", CHECK); 864 865 _perf_last_invalidated_method = 866 PerfDataManager::create_string_variable(SUN_CI, "lastInvalidatedMethod", 867 CompilerCounters::cmname_buffer_length, 868 "", CHECK); 869 870 _perf_last_compile_type = 871 PerfDataManager::create_variable(SUN_CI, "lastType", 872 PerfData::U_None, 873 (jlong)CompileBroker::no_compile, 874 CHECK); 875 876 _perf_last_compile_size = 877 PerfDataManager::create_variable(SUN_CI, "lastSize", 878 PerfData::U_Bytes, 879 (jlong)CompileBroker::no_compile, 880 CHECK); 881 882 883 _perf_last_failed_type = 884 PerfDataManager::create_variable(SUN_CI, "lastFailedType", 885 PerfData::U_None, 886 (jlong)CompileBroker::no_compile, 887 CHECK); 888 889 _perf_last_invalidated_type = 890 PerfDataManager::create_variable(SUN_CI, "lastInvalidatedType", 891 PerfData::U_None, 892 (jlong)CompileBroker::no_compile, 893 CHECK); 894 } 895 896 log_info(aot, codecache, init)("CompileBroker is initialized"); 897 _initialized = true; 898 } 899 900 Handle CompileBroker::create_thread_oop(const char* name, TRAPS) { 901 Handle thread_oop = JavaThread::create_system_thread_object(name, CHECK_NH); 902 return thread_oop; 903 } 904 905 void TrainingReplayThread::training_replay_thread_entry(JavaThread* thread, TRAPS) { 906 CompilationPolicy::replay_training_at_init_loop(thread); 907 } 908 909 #if defined(ASSERT) && COMPILER2_OR_JVMCI 910 // Entry for DeoptimizeObjectsALotThread. The threads are started in 911 // CompileBroker::init_compiler_threads() iff DeoptimizeObjectsALot is enabled 912 void DeoptimizeObjectsALotThread::deopt_objs_alot_thread_entry(JavaThread* thread, TRAPS) { 913 DeoptimizeObjectsALotThread* dt = ((DeoptimizeObjectsALotThread*) thread); 914 bool enter_single_loop; 915 { 916 MonitorLocker ml(dt, EscapeBarrier_lock, Mutex::_no_safepoint_check_flag); 917 static int single_thread_count = 0; 918 enter_single_loop = single_thread_count++ < DeoptimizeObjectsALotThreadCountSingle; 919 } 920 if (enter_single_loop) { 921 dt->deoptimize_objects_alot_loop_single(); 922 } else { 923 dt->deoptimize_objects_alot_loop_all(); 924 } 925 } 926 927 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each 928 // barrier targets a single thread which is selected round robin. 929 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_single() { 930 HandleMark hm(this); 931 while (true) { 932 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *deoptee_thread = jtiwh.next(); ) { 933 { // Begin new scope for escape barrier 934 HandleMarkCleaner hmc(this); 935 ResourceMark rm(this); 936 EscapeBarrier eb(true, this, deoptee_thread); 937 eb.deoptimize_objects(100); 938 } 939 // Now sleep after the escape barriers destructor resumed deoptee_thread. 940 sleep(DeoptimizeObjectsALotInterval); 941 } 942 } 943 } 944 945 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each 946 // barrier targets all java threads in the vm at once. 947 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_all() { 948 HandleMark hm(this); 949 while (true) { 950 { // Begin new scope for escape barrier 951 HandleMarkCleaner hmc(this); 952 ResourceMark rm(this); 953 EscapeBarrier eb(true, this); 954 eb.deoptimize_objects_all_threads(); 955 } 956 // Now sleep after the escape barriers destructor resumed the java threads. 957 sleep(DeoptimizeObjectsALotInterval); 958 } 959 } 960 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI 961 962 963 JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, CompileQueue* queue, AbstractCompiler* comp, JavaThread* THREAD) { 964 Handle thread_oop(THREAD, JNIHandles::resolve_non_null(thread_handle)); 965 966 if (java_lang_Thread::thread(thread_oop()) != nullptr) { 967 assert(type == compiler_t, "should only happen with reused compiler threads"); 968 // The compiler thread hasn't actually exited yet so don't try to reuse it 969 return nullptr; 970 } 971 972 JavaThread* new_thread = nullptr; 973 switch (type) { 974 case compiler_t: 975 assert(comp != nullptr, "Compiler instance missing."); 976 if (!InjectCompilerCreationFailure || comp->num_compiler_threads() == 0) { 977 CompilerCounters* counters = new CompilerCounters(); 978 new_thread = new CompilerThread(queue, counters); 979 } 980 break; 981 #if defined(ASSERT) && COMPILER2_OR_JVMCI 982 case deoptimizer_t: 983 new_thread = new DeoptimizeObjectsALotThread(); 984 break; 985 #endif // ASSERT 986 case training_replay_t: 987 new_thread = new TrainingReplayThread(); 988 break; 989 default: 990 ShouldNotReachHere(); 991 } 992 993 // At this point the new CompilerThread data-races with this startup 994 // thread (which is the main thread and NOT the VM thread). 995 // This means Java bytecodes being executed at startup can 996 // queue compile jobs which will run at whatever default priority the 997 // newly created CompilerThread runs at. 998 999 1000 // At this point it may be possible that no osthread was created for the 1001 // JavaThread due to lack of resources. We will handle that failure below. 1002 // Also check new_thread so that static analysis is happy. 1003 if (new_thread != nullptr && new_thread->osthread() != nullptr) { 1004 1005 if (type == compiler_t) { 1006 CompilerThread::cast(new_thread)->set_compiler(comp); 1007 } 1008 1009 // Note that we cannot call os::set_priority because it expects Java 1010 // priorities and we are *explicitly* using OS priorities so that it's 1011 // possible to set the compiler thread priority higher than any Java 1012 // thread. 1013 1014 int native_prio = CompilerThreadPriority; 1015 if (native_prio == -1) { 1016 if (UseCriticalCompilerThreadPriority) { 1017 native_prio = os::java_to_os_priority[CriticalPriority]; 1018 } else { 1019 native_prio = os::java_to_os_priority[NearMaxPriority]; 1020 } 1021 } 1022 os::set_native_priority(new_thread, native_prio); 1023 1024 // Note that this only sets the JavaThread _priority field, which by 1025 // definition is limited to Java priorities and not OS priorities. 1026 JavaThread::start_internal_daemon(THREAD, new_thread, thread_oop, NearMaxPriority); 1027 1028 } else { // osthread initialization failure 1029 if (UseDynamicNumberOfCompilerThreads && type == compiler_t 1030 && comp->num_compiler_threads() > 0) { 1031 // The new thread is not known to Thread-SMR yet so we can just delete. 1032 delete new_thread; 1033 return nullptr; 1034 } else { 1035 vm_exit_during_initialization("java.lang.OutOfMemoryError", 1036 os::native_thread_creation_failed_msg()); 1037 } 1038 } 1039 1040 os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS) 1041 1042 return new_thread; 1043 } 1044 1045 static bool trace_compiler_threads() { 1046 LogTarget(Debug, jit, thread) lt; 1047 return TraceCompilerThreads || lt.is_enabled(); 1048 } 1049 1050 static jobject create_compiler_thread(AbstractCompiler* compiler, int i, TRAPS) { 1051 char name_buffer[256]; 1052 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", compiler->name(), i); 1053 Handle thread_oop = JavaThread::create_system_thread_object(name_buffer, CHECK_NULL); 1054 return JNIHandles::make_global(thread_oop); 1055 } 1056 1057 static void print_compiler_threads(stringStream& msg) { 1058 if (TraceCompilerThreads) { 1059 tty->print_cr("%7d %s", (int)tty->time_stamp().milliseconds(), msg.as_string()); 1060 } 1061 LogTarget(Debug, jit, thread) lt; 1062 if (lt.is_enabled()) { 1063 LogStream ls(lt); 1064 ls.print_cr("%s", msg.as_string()); 1065 } 1066 } 1067 1068 static void print_compiler_thread(JavaThread *ct) { 1069 if (trace_compiler_threads()) { 1070 ResourceMark rm; 1071 ThreadsListHandle tlh; // name() depends on the TLH. 1072 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1073 stringStream msg; 1074 msg.print("Added initial compiler thread %s", ct->name()); 1075 print_compiler_threads(msg); 1076 } 1077 } 1078 1079 void CompileBroker::init_compiler_threads() { 1080 // Ensure any exceptions lead to vm_exit_during_initialization. 1081 EXCEPTION_MARK; 1082 #if !defined(ZERO) 1083 assert(_c2_count > 0 || _c1_count > 0, "No compilers?"); 1084 #endif // !ZERO 1085 // Initialize the compilation queue 1086 if (_c2_count > 0) { 1087 const char* name = JVMCI_ONLY(UseJVMCICompiler ? "JVMCI compile queue" :) "C2 compile queue"; 1088 _c2_compile_queue = new CompileQueue(name, MethodCompileQueueC2_lock); 1089 _compiler2_objects = NEW_C_HEAP_ARRAY(jobject, _c2_count, mtCompiler); 1090 _compiler2_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c2_count, mtCompiler); 1091 } 1092 if (_c1_count > 0) { 1093 _c1_compile_queue = new CompileQueue("C1 compile queue", MethodCompileQueueC1_lock); 1094 _compiler1_objects = NEW_C_HEAP_ARRAY(jobject, _c1_count, mtCompiler); 1095 _compiler1_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c1_count, mtCompiler); 1096 } 1097 1098 if (_ac_count > 0) { 1099 if (_c1_count > 0) { // C1 is present 1100 _ac1_compile_queue = new CompileQueue("C1 AOT code compile queue", MethodCompileQueueSC1_lock); 1101 } 1102 if (_c2_count > 0) { // C2 is present 1103 _ac2_compile_queue = new CompileQueue("C2 AOT code compile queue", MethodCompileQueueSC2_lock); 1104 } 1105 _ac_objects = NEW_C_HEAP_ARRAY(jobject, _ac_count, mtCompiler); 1106 _ac_logs = NEW_C_HEAP_ARRAY(CompileLog*, _ac_count, mtCompiler); 1107 } 1108 char name_buffer[256]; 1109 1110 for (int i = 0; i < _c2_count; i++) { 1111 // Create a name for our thread. 1112 jobject thread_handle = create_compiler_thread(_compilers[1], i, CHECK); 1113 _compiler2_objects[i] = thread_handle; 1114 _compiler2_logs[i] = nullptr; 1115 1116 if (!UseDynamicNumberOfCompilerThreads || i == 0) { 1117 JavaThread *ct = make_thread(compiler_t, thread_handle, _c2_compile_queue, _compilers[1], THREAD); 1118 assert(ct != nullptr, "should have been handled for initial thread"); 1119 _compilers[1]->set_num_compiler_threads(i + 1); 1120 print_compiler_thread(ct); 1121 } 1122 } 1123 1124 for (int i = 0; i < _c1_count; i++) { 1125 // Create a name for our thread. 1126 jobject thread_handle = create_compiler_thread(_compilers[0], i, CHECK); 1127 _compiler1_objects[i] = thread_handle; 1128 _compiler1_logs[i] = nullptr; 1129 1130 if (!UseDynamicNumberOfCompilerThreads || i == 0) { 1131 JavaThread *ct = make_thread(compiler_t, thread_handle, _c1_compile_queue, _compilers[0], THREAD); 1132 assert(ct != nullptr, "should have been handled for initial thread"); 1133 _compilers[0]->set_num_compiler_threads(i + 1); 1134 print_compiler_thread(ct); 1135 } 1136 } 1137 1138 if (_ac_count > 0) { 1139 int i = 0; 1140 if (_c1_count > 0) { // C1 is present 1141 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d AOT code caching CompilerThread", 1); 1142 Handle thread_oop = create_thread_oop(name_buffer, CHECK); 1143 jobject thread_handle = JNIHandles::make_global(thread_oop); 1144 _ac_objects[i] = thread_handle; 1145 _ac_logs[i] = nullptr; 1146 i++; 1147 1148 JavaThread *ct = make_thread(compiler_t, thread_handle, _ac1_compile_queue, _compilers[0], THREAD); 1149 assert(ct != nullptr, "should have been handled for initial thread"); 1150 print_compiler_thread(ct); 1151 } 1152 if (_c2_count > 0) { // C2 is present 1153 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d AOT code caching CompilerThread", 2); 1154 Handle thread_oop = create_thread_oop(name_buffer, CHECK); 1155 jobject thread_handle = JNIHandles::make_global(thread_oop); 1156 _ac_objects[i] = thread_handle; 1157 _ac_logs[i] = nullptr; 1158 1159 JavaThread *ct = make_thread(compiler_t, thread_handle, _ac2_compile_queue, _compilers[1], THREAD); 1160 assert(ct != nullptr, "should have been handled for initial thread"); 1161 print_compiler_thread(ct); 1162 } 1163 } 1164 1165 if (UsePerfData) { 1166 PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, _c1_count + _c2_count, CHECK); 1167 } 1168 1169 #if defined(ASSERT) && COMPILER2_OR_JVMCI 1170 if (DeoptimizeObjectsALot) { 1171 // Initialize and start the object deoptimizer threads 1172 const int total_count = DeoptimizeObjectsALotThreadCountSingle + DeoptimizeObjectsALotThreadCountAll; 1173 for (int count = 0; count < total_count; count++) { 1174 Handle thread_oop = JavaThread::create_system_thread_object("Deoptimize objects a lot single mode", CHECK); 1175 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); 1176 make_thread(deoptimizer_t, thread_handle, nullptr, nullptr, THREAD); 1177 } 1178 } 1179 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI 1180 } 1181 1182 void CompileBroker::init_training_replay() { 1183 // Ensure any exceptions lead to vm_exit_during_initialization. 1184 EXCEPTION_MARK; 1185 if (TrainingData::have_data()) { 1186 Handle thread_oop = create_thread_oop("Training replay thread", CHECK); 1187 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); 1188 make_thread(training_replay_t, thread_handle, nullptr, nullptr, THREAD); 1189 } 1190 } 1191 1192 void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) { 1193 1194 int old_c2_count = 0, new_c2_count = 0, old_c1_count = 0, new_c1_count = 0; 1195 const int c2_tasks_per_thread = 2, c1_tasks_per_thread = 4; 1196 1197 // Quick check if we already have enough compiler threads without taking the lock. 1198 // Numbers may change concurrently, so we read them again after we have the lock. 1199 if (_c2_compile_queue != nullptr) { 1200 old_c2_count = get_c2_thread_count(); 1201 new_c2_count = MIN2(_c2_count, _c2_compile_queue->size() / c2_tasks_per_thread); 1202 } 1203 if (_c1_compile_queue != nullptr) { 1204 old_c1_count = get_c1_thread_count(); 1205 new_c1_count = MIN2(_c1_count, _c1_compile_queue->size() / c1_tasks_per_thread); 1206 } 1207 if (new_c2_count <= old_c2_count && new_c1_count <= old_c1_count) return; 1208 1209 // Now, we do the more expensive operations. 1210 size_t free_memory = 0; 1211 // Return value ignored - defaulting to 0 on failure. 1212 (void)os::free_memory(free_memory); 1213 // If SegmentedCodeCache is off, both values refer to the single heap (with type CodeBlobType::All). 1214 size_t available_cc_np = CodeCache::unallocated_capacity(CodeBlobType::MethodNonProfiled), 1215 available_cc_p = CodeCache::unallocated_capacity(CodeBlobType::MethodProfiled); 1216 1217 // Only attempt to start additional threads if the lock is free. 1218 if (!CompileThread_lock->try_lock()) return; 1219 1220 if (_c2_compile_queue != nullptr) { 1221 old_c2_count = get_c2_thread_count(); 1222 new_c2_count = MIN4(_c2_count, 1223 _c2_compile_queue->size() / c2_tasks_per_thread, 1224 (int)(free_memory / (200*M)), 1225 (int)(available_cc_np / (128*K))); 1226 1227 for (int i = old_c2_count; i < new_c2_count; i++) { 1228 #if INCLUDE_JVMCI 1229 if (UseJVMCICompiler && !UseJVMCINativeLibrary && _compiler2_objects[i] == nullptr) { 1230 // Native compiler threads as used in C1/C2 can reuse the j.l.Thread objects as their 1231 // existence is completely hidden from the rest of the VM (and those compiler threads can't 1232 // call Java code to do the creation anyway). 1233 // 1234 // For pure Java JVMCI we have to create new j.l.Thread objects as they are visible and we 1235 // can see unexpected thread lifecycle transitions if we bind them to new JavaThreads. For 1236 // native library JVMCI it's preferred to use the C1/C2 strategy as this avoids unnecessary 1237 // coupling with Java. 1238 if (!THREAD->can_call_java()) break; 1239 char name_buffer[256]; 1240 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", _compilers[1]->name(), i); 1241 Handle thread_oop; 1242 { 1243 // We have to give up the lock temporarily for the Java calls. 1244 MutexUnlocker mu(CompileThread_lock); 1245 thread_oop = JavaThread::create_system_thread_object(name_buffer, THREAD); 1246 } 1247 if (HAS_PENDING_EXCEPTION) { 1248 if (trace_compiler_threads()) { 1249 ResourceMark rm; 1250 stringStream msg; 1251 msg.print_cr("JVMCI compiler thread creation failed:"); 1252 PENDING_EXCEPTION->print_on(&msg); 1253 print_compiler_threads(msg); 1254 } 1255 CLEAR_PENDING_EXCEPTION; 1256 break; 1257 } 1258 // Check if another thread has beaten us during the Java calls. 1259 if (get_c2_thread_count() != i) break; 1260 jobject thread_handle = JNIHandles::make_global(thread_oop); 1261 assert(compiler2_object(i) == nullptr, "Old one must be released!"); 1262 _compiler2_objects[i] = thread_handle; 1263 } 1264 #endif 1265 guarantee(compiler2_object(i) != nullptr, "Thread oop must exist"); 1266 JavaThread *ct = make_thread(compiler_t, compiler2_object(i), _c2_compile_queue, _compilers[1], THREAD); 1267 if (ct == nullptr) break; 1268 _compilers[1]->set_num_compiler_threads(i + 1); 1269 if (trace_compiler_threads()) { 1270 ResourceMark rm; 1271 ThreadsListHandle tlh; // name() depends on the TLH. 1272 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1273 stringStream msg; 1274 msg.print("Added compiler thread %s (free memory: %dMB, available non-profiled code cache: %dMB)", 1275 ct->name(), (int)(free_memory/M), (int)(available_cc_np/M)); 1276 print_compiler_threads(msg); 1277 } 1278 } 1279 } 1280 1281 if (_c1_compile_queue != nullptr) { 1282 old_c1_count = get_c1_thread_count(); 1283 new_c1_count = MIN4(_c1_count, 1284 _c1_compile_queue->size() / c1_tasks_per_thread, 1285 (int)(free_memory / (100*M)), 1286 (int)(available_cc_p / (128*K))); 1287 1288 for (int i = old_c1_count; i < new_c1_count; i++) { 1289 JavaThread *ct = make_thread(compiler_t, compiler1_object(i), _c1_compile_queue, _compilers[0], THREAD); 1290 if (ct == nullptr) break; 1291 _compilers[0]->set_num_compiler_threads(i + 1); 1292 if (trace_compiler_threads()) { 1293 ResourceMark rm; 1294 ThreadsListHandle tlh; // name() depends on the TLH. 1295 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1296 stringStream msg; 1297 msg.print("Added compiler thread %s (free memory: %dMB, available profiled code cache: %dMB)", 1298 ct->name(), (int)(free_memory/M), (int)(available_cc_p/M)); 1299 print_compiler_threads(msg); 1300 } 1301 } 1302 } 1303 1304 CompileThread_lock->unlock(); 1305 } 1306 1307 1308 /** 1309 * Set the methods on the stack as on_stack so that redefine classes doesn't 1310 * reclaim them. This method is executed at a safepoint. 1311 */ 1312 void CompileBroker::mark_on_stack() { 1313 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 1314 // Since we are at a safepoint, we do not need a lock to access 1315 // the compile queues. 1316 if (_c2_compile_queue != nullptr) { 1317 _c2_compile_queue->mark_on_stack(); 1318 } 1319 if (_c1_compile_queue != nullptr) { 1320 _c1_compile_queue->mark_on_stack(); 1321 } 1322 if (_ac1_compile_queue != nullptr) { 1323 _ac1_compile_queue->mark_on_stack(); 1324 } 1325 if (_ac2_compile_queue != nullptr) { 1326 _ac2_compile_queue->mark_on_stack(); 1327 } 1328 } 1329 1330 // ------------------------------------------------------------------ 1331 // CompileBroker::compile_method 1332 // 1333 // Request compilation of a method. 1334 void CompileBroker::compile_method_base(const methodHandle& method, 1335 int osr_bci, 1336 int comp_level, 1337 int hot_count, 1338 CompileTask::CompileReason compile_reason, 1339 bool requires_online_compilation, 1340 bool blocking, 1341 Thread* thread) { 1342 guarantee(!method->is_abstract(), "cannot compile abstract methods"); 1343 assert(method->method_holder()->is_instance_klass(), 1344 "sanity check"); 1345 assert(!method->method_holder()->is_not_initialized() || 1346 compile_reason == CompileTask::Reason_Preload || 1347 compile_reason == CompileTask::Reason_Precompile || 1348 compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized"); 1349 assert(!method->is_method_handle_intrinsic(), "do not enqueue these guys"); 1350 1351 if (CIPrintRequests) { 1352 tty->print("request: "); 1353 method->print_short_name(tty); 1354 if (osr_bci != InvocationEntryBci) { 1355 tty->print(" osr_bci: %d", osr_bci); 1356 } 1357 tty->print(" level: %d comment: %s count: %d", comp_level, CompileTask::reason_name(compile_reason), hot_count); 1358 if (hot_count > 0) { 1359 tty->print(" hot: yes"); 1360 } 1361 tty->cr(); 1362 } 1363 1364 // A request has been made for compilation. Before we do any 1365 // real work, check to see if the method has been compiled 1366 // in the meantime with a definitive result. 1367 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1368 return; 1369 } 1370 1371 #ifndef PRODUCT 1372 if (osr_bci != -1 && !FLAG_IS_DEFAULT(OSROnlyBCI)) { 1373 if ((OSROnlyBCI > 0) ? (OSROnlyBCI != osr_bci) : (-OSROnlyBCI == osr_bci)) { 1374 // Positive OSROnlyBCI means only compile that bci. Negative means don't compile that BCI. 1375 return; 1376 } 1377 } 1378 #endif 1379 1380 // If this method is already in the compile queue, then 1381 // we do not block the current thread. 1382 if (compilation_is_in_queue(method)) { 1383 // We may want to decay our counter a bit here to prevent 1384 // multiple denied requests for compilation. This is an 1385 // open compilation policy issue. Note: The other possibility, 1386 // in the case that this is a blocking compile request, is to have 1387 // all subsequent blocking requesters wait for completion of 1388 // ongoing compiles. Note that in this case we'll need a protocol 1389 // for freeing the associated compile tasks. [Or we could have 1390 // a single static monitor on which all these waiters sleep.] 1391 return; 1392 } 1393 1394 // Tiered policy requires MethodCounters to exist before adding a method to 1395 // the queue. Create if we don't have them yet. 1396 if (compile_reason != CompileTask::Reason_Preload) { 1397 method->get_method_counters(thread); 1398 } 1399 1400 AOTCodeEntry* aot_code_entry = find_aot_code_entry(method, osr_bci, comp_level, compile_reason, requires_online_compilation); 1401 bool is_aot = (aot_code_entry != nullptr); 1402 1403 // Outputs from the following MutexLocker block: 1404 CompileTask* task = nullptr; 1405 CompileQueue* queue = compile_queue(comp_level, is_aot); 1406 1407 // Acquire our lock. 1408 { 1409 ConditionalMutexLocker locker(thread, queue->lock(), !UseLockFreeCompileQueues); 1410 1411 // Make sure the method has not slipped into the queues since 1412 // last we checked; note that those checks were "fast bail-outs". 1413 // Here we need to be more careful, see 14012000 below. 1414 if (compilation_is_in_queue(method)) { 1415 return; 1416 } 1417 1418 // We need to check again to see if the compilation has 1419 // completed. A previous compilation may have registered 1420 // some result. 1421 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1422 return; 1423 } 1424 1425 // We now know that this compilation is not pending, complete, 1426 // or prohibited. Assign a compile_id to this compilation 1427 // and check to see if it is in our [Start..Stop) range. 1428 int compile_id = assign_compile_id(method, osr_bci); 1429 if (compile_id == 0) { 1430 // The compilation falls outside the allowed range. 1431 return; 1432 } 1433 1434 #if INCLUDE_JVMCI 1435 if (UseJVMCICompiler && blocking) { 1436 // Don't allow blocking compiles for requests triggered by JVMCI. 1437 if (thread->is_Compiler_thread()) { 1438 blocking = false; 1439 } 1440 1441 // In libjvmci, JVMCI initialization should not deadlock with other threads 1442 if (!UseJVMCINativeLibrary) { 1443 // Don't allow blocking compiles if inside a class initializer or while performing class loading 1444 vframeStream vfst(JavaThread::cast(thread)); 1445 for (; !vfst.at_end(); vfst.next()) { 1446 if (vfst.method()->is_static_initializer() || 1447 (vfst.method()->method_holder()->is_subclass_of(vmClasses::ClassLoader_klass()) && 1448 vfst.method()->name() == vmSymbols::loadClass_name())) { 1449 blocking = false; 1450 break; 1451 } 1452 } 1453 1454 // Don't allow blocking compilation requests to JVMCI 1455 // if JVMCI itself is not yet initialized 1456 if (!JVMCI::is_compiler_initialized() && compiler(comp_level)->is_jvmci()) { 1457 blocking = false; 1458 } 1459 } 1460 1461 // Don't allow blocking compilation requests if we are in JVMCIRuntime::shutdown 1462 // to avoid deadlock between compiler thread(s) and threads run at shutdown 1463 // such as the DestroyJavaVM thread. 1464 if (JVMCI::in_shutdown()) { 1465 blocking = false; 1466 } 1467 } 1468 #endif // INCLUDE_JVMCI 1469 1470 // We will enter the compilation in the queue. 1471 // 14012000: Note that this sets the queued_for_compile bits in 1472 // the target method. We can now reason that a method cannot be 1473 // queued for compilation more than once, as follows: 1474 // Before a thread queues a task for compilation, it first acquires 1475 // the compile queue lock, then checks if the method's queued bits 1476 // are set or it has already been compiled. Thus there can not be two 1477 // instances of a compilation task for the same method on the 1478 // compilation queue. Consider now the case where the compilation 1479 // thread has already removed a task for that method from the queue 1480 // and is in the midst of compiling it. In this case, the 1481 // queued_for_compile bits must be set in the method (and these 1482 // will be visible to the current thread, since the bits were set 1483 // under protection of the compile queue lock, which we hold now. 1484 // When the compilation completes, the compiler thread first sets 1485 // the compilation result and then clears the queued_for_compile 1486 // bits. Neither of these actions are protected by a barrier (or done 1487 // under the protection of a lock), so the only guarantee we have 1488 // (on machines with TSO (Total Store Order)) is that these values 1489 // will update in that order. As a result, the only combinations of 1490 // these bits that the current thread will see are, in temporal order: 1491 // <RESULT, QUEUE> : 1492 // <0, 1> : in compile queue, but not yet compiled 1493 // <1, 1> : compiled but queue bit not cleared 1494 // <1, 0> : compiled and queue bit cleared 1495 // Because we first check the queue bits then check the result bits, 1496 // we are assured that we cannot introduce a duplicate task. 1497 // Note that if we did the tests in the reverse order (i.e. check 1498 // result then check queued bit), we could get the result bit before 1499 // the compilation completed, and the queue bit after the compilation 1500 // completed, and end up introducing a "duplicate" (redundant) task. 1501 // In that case, the compiler thread should first check if a method 1502 // has already been compiled before trying to compile it. 1503 // NOTE: in the event that there are multiple compiler threads and 1504 // there is de-optimization/recompilation, things will get hairy, 1505 // and in that case it's best to protect both the testing (here) of 1506 // these bits, and their updating (here and elsewhere) under a 1507 // common lock. 1508 task = create_compile_task(queue, 1509 compile_id, method, 1510 osr_bci, comp_level, 1511 hot_count, aot_code_entry, compile_reason, 1512 requires_online_compilation, blocking); 1513 1514 if (task->is_aot_load() && (_ac_count > 0)) { 1515 // Put it on AOT code caching queue 1516 queue = is_c1_compile(comp_level) ? _ac1_compile_queue : _ac2_compile_queue; 1517 } 1518 1519 if (UseLockFreeCompileQueues) { 1520 assert(queue->lock()->owned_by_self() == false, ""); 1521 queue->add_pending(task); 1522 } else { 1523 queue->add(task); 1524 } 1525 } 1526 1527 if (blocking) { 1528 wait_for_completion(task); 1529 } 1530 } 1531 1532 AOTCodeEntry* CompileBroker::find_aot_code_entry(const methodHandle& method, int osr_bci, int comp_level, 1533 CompileTask::CompileReason compile_reason, 1534 bool requires_online_compilation) { 1535 if (requires_online_compilation || compile_reason == CompileTask::Reason_Whitebox) { 1536 return nullptr; // Need normal JIT compilation 1537 } 1538 AOTCodeEntry* aot_code_entry = nullptr; 1539 if (osr_bci == InvocationEntryBci && AOTCodeCache::is_using_code()) { 1540 // Check for AOT preload code first. 1541 if (compile_reason == CompileTask::Reason_Preload) { 1542 aot_code_entry = method->aot_code_entry(); 1543 assert(aot_code_entry != nullptr && aot_code_entry->for_preload(), "sanity"); 1544 } else { 1545 aot_code_entry = AOTCodeCache::find_code_entry(method, comp_level); 1546 } 1547 } 1548 return aot_code_entry; 1549 } 1550 1551 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, 1552 int comp_level, 1553 int hot_count, 1554 bool requires_online_compilation, 1555 CompileTask::CompileReason compile_reason, 1556 TRAPS) { 1557 // Do nothing if compilebroker is not initialized or compiles are submitted on level none 1558 if (!_initialized || comp_level == CompLevel_none) { 1559 return nullptr; 1560 } 1561 1562 AbstractCompiler *comp = CompileBroker::compiler(comp_level); 1563 assert(comp != nullptr, "Ensure we have a compiler"); 1564 1565 #if INCLUDE_JVMCI 1566 if (comp->is_jvmci() && !JVMCI::can_initialize_JVMCI()) { 1567 // JVMCI compilation is not yet initializable. 1568 return nullptr; 1569 } 1570 #endif 1571 1572 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp); 1573 // CompileBroker::compile_method can trap and can have pending async exception. 1574 nmethod* nm = CompileBroker::compile_method(method, osr_bci, comp_level, hot_count, requires_online_compilation, compile_reason, directive, THREAD); 1575 DirectivesStack::release(directive); 1576 return nm; 1577 } 1578 1579 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, 1580 int comp_level, 1581 int hot_count, 1582 bool requires_online_compilation, 1583 CompileTask::CompileReason compile_reason, 1584 DirectiveSet* directive, 1585 TRAPS) { 1586 1587 // make sure arguments make sense 1588 assert(method->method_holder()->is_instance_klass(), "not an instance method"); 1589 assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range"); 1590 assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods"); 1591 assert(!method->method_holder()->is_not_initialized() || 1592 compile_reason == CompileTask::Reason_Preload || 1593 compile_reason == CompileTask::Reason_Precompile || 1594 compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized"); 1595 // return quickly if possible 1596 bool aot_compilation = (PrecompileCode && PrecompileOnlyAndExit) || 1597 CDSConfig::is_dumping_aot_code(); 1598 if (aot_compilation && !CompileTask::reason_is_precompile(compile_reason)) { 1599 // Skip normal compilations when compiling AOT code 1600 return nullptr; 1601 } 1602 1603 // lock, make sure that the compilation 1604 // isn't prohibited in a straightforward way. 1605 AbstractCompiler* comp = CompileBroker::compiler(comp_level); 1606 if (comp == nullptr || compilation_is_prohibited(method, osr_bci, comp_level, directive->ExcludeOption)) { 1607 return nullptr; 1608 } 1609 1610 if (osr_bci == InvocationEntryBci) { 1611 // standard compilation 1612 nmethod* method_code = method->code(); 1613 if (method_code != nullptr) { 1614 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1615 return method_code; 1616 } 1617 } 1618 if (method->is_not_compilable(comp_level)) { 1619 return nullptr; 1620 } 1621 } else { 1622 // osr compilation 1623 // We accept a higher level osr method 1624 nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false); 1625 if (nm != nullptr) return nm; 1626 if (method->is_not_osr_compilable(comp_level)) return nullptr; 1627 } 1628 1629 assert(!HAS_PENDING_EXCEPTION, "No exception should be present"); 1630 // some prerequisites that are compiler specific 1631 if (compile_reason != CompileTask::Reason_Preload && 1632 !CompileTask::reason_is_precompile(compile_reason) && 1633 (comp->is_c2() || comp->is_jvmci())) { 1634 InternalOOMEMark iom(THREAD); 1635 method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NONASYNC_NULL); 1636 // Resolve all classes seen in the signature of the method 1637 // we are compiling. 1638 Method::load_signature_classes(method, CHECK_AND_CLEAR_NONASYNC_NULL); 1639 } 1640 1641 // If the method is native, do the lookup in the thread requesting 1642 // the compilation. Native lookups can load code, which is not 1643 // permitted during compilation. 1644 // 1645 // Note: A native method implies non-osr compilation which is 1646 // checked with an assertion at the entry of this method. 1647 if (method->is_native() && !method->is_method_handle_intrinsic()) { 1648 address adr = NativeLookup::lookup(method, THREAD); 1649 if (HAS_PENDING_EXCEPTION) { 1650 // In case of an exception looking up the method, we just forget 1651 // about it. The interpreter will kick-in and throw the exception. 1652 method->set_not_compilable("NativeLookup::lookup failed"); // implies is_not_osr_compilable() 1653 CLEAR_PENDING_EXCEPTION; 1654 return nullptr; 1655 } 1656 assert(method->has_native_function(), "must have native code by now"); 1657 } 1658 1659 // RedefineClasses() has replaced this method; just return 1660 if (method->is_old()) { 1661 return nullptr; 1662 } 1663 1664 // JVMTI -- post_compile_event requires jmethod_id() that may require 1665 // a lock the compiling thread can not acquire. Prefetch it here. 1666 if (JvmtiExport::should_post_compiled_method_load()) { 1667 method->jmethod_id(); 1668 } 1669 1670 // do the compilation 1671 if (method->is_native()) { 1672 if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) { 1673 // To properly handle the appendix argument for out-of-line calls we are using a small trampoline that 1674 // pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime). 1675 // 1676 // Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter 1677 // in this case. If we can't generate one and use it we can not execute the out-of-line method handle calls. 1678 AdapterHandlerLibrary::create_native_wrapper(method); 1679 } else { 1680 return nullptr; 1681 } 1682 } else { 1683 // If the compiler is shut off due to code cache getting full 1684 // fail out now so blocking compiles dont hang the java thread 1685 if (!should_compile_new_jobs()) { 1686 return nullptr; 1687 } 1688 bool is_blocking = ReplayCompiles || 1689 !directive->BackgroundCompilationOption || 1690 (PreloadBlocking && (compile_reason == CompileTask::Reason_Preload)); 1691 compile_method_base(method, osr_bci, comp_level, hot_count, compile_reason, requires_online_compilation, is_blocking, THREAD); 1692 } 1693 1694 // return requested nmethod 1695 // We accept a higher level osr method 1696 if (osr_bci == InvocationEntryBci) { 1697 return method->code(); 1698 } 1699 return method->lookup_osr_nmethod_for(osr_bci, comp_level, false); 1700 } 1701 1702 1703 // ------------------------------------------------------------------ 1704 // CompileBroker::compilation_is_complete 1705 // 1706 // See if compilation of this method is already complete. 1707 bool CompileBroker::compilation_is_complete(Method* method, 1708 int osr_bci, 1709 int comp_level, 1710 bool online_only, 1711 CompileTask::CompileReason compile_reason) { 1712 if (compile_reason == CompileTask::Reason_Precompile || 1713 compile_reason == CompileTask::Reason_PrecompileForPreload) { 1714 return false; // FIXME: any restrictions? 1715 } 1716 bool is_osr = (osr_bci != standard_entry_bci); 1717 if (is_osr) { 1718 if (method->is_not_osr_compilable(comp_level)) { 1719 return true; 1720 } else { 1721 nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true); 1722 return (result != nullptr); 1723 } 1724 } else { 1725 if (method->is_not_compilable(comp_level)) { 1726 return true; 1727 } else { 1728 nmethod* result = method->code(); 1729 if (result == nullptr) { 1730 return false; 1731 } 1732 if (online_only && result->is_aot()) { 1733 return false; 1734 } 1735 bool same_level = (comp_level == result->comp_level()); 1736 if (result->has_clinit_barriers()) { 1737 return !same_level; // Allow replace preloaded code with new code of the same level 1738 } 1739 return same_level; 1740 } 1741 } 1742 } 1743 1744 1745 /** 1746 * See if this compilation is already requested. 1747 * 1748 * Implementation note: there is only a single "is in queue" bit 1749 * for each method. This means that the check below is overly 1750 * conservative in the sense that an osr compilation in the queue 1751 * will block a normal compilation from entering the queue (and vice 1752 * versa). This can be remedied by a full queue search to disambiguate 1753 * cases. If it is deemed profitable, this may be done. 1754 */ 1755 bool CompileBroker::compilation_is_in_queue(const methodHandle& method) { 1756 return method->queued_for_compilation(); 1757 } 1758 1759 // ------------------------------------------------------------------ 1760 // CompileBroker::compilation_is_prohibited 1761 // 1762 // See if this compilation is not allowed. 1763 bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded) { 1764 bool is_native = method->is_native(); 1765 // Some compilers may not support the compilation of natives. 1766 AbstractCompiler *comp = compiler(comp_level); 1767 if (is_native && (!CICompileNatives || comp == nullptr)) { 1768 method->set_not_compilable_quietly("native methods not supported", comp_level); 1769 return true; 1770 } 1771 1772 bool is_osr = (osr_bci != standard_entry_bci); 1773 // Some compilers may not support on stack replacement. 1774 if (is_osr && (!CICompileOSR || comp == nullptr)) { 1775 method->set_not_osr_compilable("OSR not supported", comp_level); 1776 return true; 1777 } 1778 1779 // The method may be explicitly excluded by the user. 1780 double scale; 1781 if (excluded || (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, scale) && scale == 0)) { 1782 bool quietly = CompilerOracle::be_quiet(); 1783 if (PrintCompilation && !quietly) { 1784 // This does not happen quietly... 1785 ResourceMark rm; 1786 tty->print("### Excluding %s:%s", 1787 method->is_native() ? "generation of native wrapper" : "compile", 1788 (method->is_static() ? " static" : "")); 1789 method->print_short_name(tty); 1790 tty->cr(); 1791 } 1792 method->set_not_compilable("excluded by CompileCommand", comp_level, !quietly); 1793 } 1794 1795 return false; 1796 } 1797 1798 /** 1799 * Generate serialized IDs for compilation requests. If certain debugging flags are used 1800 * and the ID is not within the specified range, the method is not compiled and 0 is returned. 1801 * The function also allows to generate separate compilation IDs for OSR compilations. 1802 */ 1803 int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) { 1804 #ifdef ASSERT 1805 bool is_osr = (osr_bci != standard_entry_bci); 1806 int id; 1807 if (method->is_native()) { 1808 assert(!is_osr, "can't be osr"); 1809 // Adapters, native wrappers and method handle intrinsics 1810 // should be generated always. 1811 return AtomicAccess::add(CICountNative ? &_native_compilation_id : &_compilation_id, 1); 1812 } else if (CICountOSR && is_osr) { 1813 id = AtomicAccess::add(&_osr_compilation_id, 1); 1814 if (CIStartOSR <= id && id < CIStopOSR) { 1815 return id; 1816 } 1817 } else { 1818 id = AtomicAccess::add(&_compilation_id, 1); 1819 if (CIStart <= id && id < CIStop) { 1820 return id; 1821 } 1822 } 1823 1824 // Method was not in the appropriate compilation range. 1825 method->set_not_compilable_quietly("Not in requested compile id range"); 1826 return 0; 1827 #else 1828 // CICountOSR is a develop flag and set to 'false' by default. In a product built, 1829 // only _compilation_id is incremented. 1830 return AtomicAccess::add(&_compilation_id, 1); 1831 #endif 1832 } 1833 1834 // ------------------------------------------------------------------ 1835 // CompileBroker::assign_compile_id_unlocked 1836 // 1837 // Public wrapper for assign_compile_id that acquires the needed locks 1838 int CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci) { 1839 return assign_compile_id(method, osr_bci); 1840 } 1841 1842 // ------------------------------------------------------------------ 1843 // CompileBroker::create_compile_task 1844 // 1845 // Create a CompileTask object representing the current request for 1846 // compilation. Add this task to the queue. 1847 CompileTask* CompileBroker::create_compile_task(CompileQueue* queue, 1848 int compile_id, 1849 const methodHandle& method, 1850 int osr_bci, 1851 int comp_level, 1852 int hot_count, 1853 AOTCodeEntry* aot_code_entry, 1854 CompileTask::CompileReason compile_reason, 1855 bool requires_online_compilation, 1856 bool blocking) { 1857 CompileTask* new_task = new CompileTask(compile_id, method, osr_bci, comp_level, 1858 hot_count, aot_code_entry, compile_reason, queue, 1859 requires_online_compilation, blocking); 1860 return new_task; 1861 } 1862 1863 #if INCLUDE_JVMCI 1864 // The number of milliseconds to wait before checking if 1865 // JVMCI compilation has made progress. 1866 static const long JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE = 1000; 1867 1868 // The number of JVMCI compilation progress checks that must fail 1869 // before unblocking a thread waiting for a blocking compilation. 1870 static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10; 1871 1872 /** 1873 * Waits for a JVMCI compiler to complete a given task. This thread 1874 * waits until either the task completes or it sees no JVMCI compilation 1875 * progress for N consecutive milliseconds where N is 1876 * JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE * 1877 * JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS. 1878 * 1879 * @return true if this thread needs to delete the task 1880 */ 1881 bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) { 1882 assert(UseJVMCICompiler, "sanity"); 1883 MonitorLocker ml(thread, CompileTaskWait_lock); 1884 int progress_wait_attempts = 0; 1885 jint thread_jvmci_compilation_ticks = 0; 1886 jint global_jvmci_compilation_ticks = jvmci->global_compilation_ticks(); 1887 while (!task->is_complete() && !is_compilation_disabled_forever() && 1888 ml.wait(JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE)) { 1889 JVMCICompileState* jvmci_compile_state = task->blocking_jvmci_compile_state(); 1890 1891 bool progress; 1892 if (jvmci_compile_state != nullptr) { 1893 jint ticks = jvmci_compile_state->compilation_ticks(); 1894 progress = (ticks - thread_jvmci_compilation_ticks) != 0; 1895 JVMCI_event_1("waiting on compilation %d [ticks=%d]", task->compile_id(), ticks); 1896 thread_jvmci_compilation_ticks = ticks; 1897 } else { 1898 // Still waiting on JVMCI compiler queue. This thread may be holding a lock 1899 // that all JVMCI compiler threads are blocked on. We use the global JVMCI 1900 // compilation ticks to determine whether JVMCI compilation 1901 // is still making progress through the JVMCI compiler queue. 1902 jint ticks = jvmci->global_compilation_ticks(); 1903 progress = (ticks - global_jvmci_compilation_ticks) != 0; 1904 JVMCI_event_1("waiting on compilation %d to be queued [ticks=%d]", task->compile_id(), ticks); 1905 global_jvmci_compilation_ticks = ticks; 1906 } 1907 1908 if (!progress) { 1909 if (++progress_wait_attempts == JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS) { 1910 if (PrintCompilation) { 1911 task->print(tty, "wait for blocking compilation timed out"); 1912 } 1913 JVMCI_event_1("waiting on compilation %d timed out", task->compile_id()); 1914 break; 1915 } 1916 } else { 1917 progress_wait_attempts = 0; 1918 } 1919 } 1920 task->clear_waiter(); 1921 return task->is_complete(); 1922 } 1923 #endif 1924 1925 /** 1926 * Wait for the compilation task to complete. 1927 */ 1928 void CompileBroker::wait_for_completion(CompileTask* task) { 1929 if (CIPrintCompileQueue) { 1930 ttyLocker ttyl; 1931 tty->print_cr("BLOCKING FOR COMPILE"); 1932 } 1933 1934 assert(task->is_blocking(), "can only wait on blocking task"); 1935 1936 JavaThread* thread = JavaThread::current(); 1937 1938 methodHandle method(thread, task->method()); 1939 bool free_task; 1940 #if INCLUDE_JVMCI 1941 AbstractCompiler* comp = compiler(task->comp_level()); 1942 if (!UseJVMCINativeLibrary && comp->is_jvmci() && !task->should_wait_for_compilation()) { 1943 // It may return before compilation is completed. 1944 // Note that libjvmci should not pre-emptively unblock 1945 // a thread waiting for a compilation as it does not call 1946 // Java code and so is not deadlock prone like jarjvmci. 1947 free_task = wait_for_jvmci_completion((JVMCICompiler*) comp, task, thread); 1948 } else 1949 #endif 1950 { 1951 free_task = true; 1952 // Wait until the task is complete or compilation is shut down. 1953 MonitorLocker ml(thread, CompileTaskWait_lock); 1954 while (!task->is_complete() && !is_compilation_disabled_forever()) { 1955 ml.wait(); 1956 } 1957 } 1958 1959 // It is harmless to check this status without the lock, because 1960 // completion is a stable property. 1961 if (!task->is_complete()) { 1962 // Task is not complete, likely because we are exiting for compilation 1963 // shutdown. The task can still be reached through the queue, or executed 1964 // by some compiler thread. There is no coordination with either MCQ lock 1965 // holders or compilers, therefore we cannot delete the task. 1966 // 1967 // This will leave task allocated, which leaks it. At this (degraded) point, 1968 // it is less risky to abandon the task, rather than attempting a more 1969 // complicated deletion protocol. 1970 free_task = false; 1971 } 1972 1973 if (free_task) { 1974 assert(task->is_complete(), "Compilation should have completed"); 1975 assert(task->next() == nullptr && task->prev() == nullptr, 1976 "Completed task should not be in the queue"); 1977 1978 // By convention, the waiter is responsible for deleting a 1979 // blocking CompileTask. Since there is only one waiter ever 1980 // waiting on a CompileTask, we know that no one else will 1981 // be using this CompileTask; we can delete it. 1982 delete task; 1983 } 1984 } 1985 1986 void CompileBroker::wait_for_no_active_tasks() { 1987 CompileTask::wait_for_no_active_tasks(); 1988 } 1989 1990 /** 1991 * Initialize compiler thread(s) + compiler object(s). The postcondition 1992 * of this function is that the compiler runtimes are initialized and that 1993 * compiler threads can start compiling. 1994 */ 1995 bool CompileBroker::init_compiler_runtime() { 1996 CompilerThread* thread = CompilerThread::current(); 1997 AbstractCompiler* comp = thread->compiler(); 1998 // Final sanity check - the compiler object must exist 1999 guarantee(comp != nullptr, "Compiler object must exist"); 2000 2001 { 2002 // Must switch to native to allocate ci_env 2003 ThreadToNativeFromVM ttn(thread); 2004 ciEnv ci_env((CompileTask*)nullptr); 2005 // Cache Jvmti state 2006 ci_env.cache_jvmti_state(); 2007 // Cache DTrace flags 2008 ci_env.cache_dtrace_flags(); 2009 2010 // Switch back to VM state to do compiler initialization 2011 ThreadInVMfromNative tv(thread); 2012 2013 comp->initialize(); 2014 } 2015 2016 if (comp->is_failed()) { 2017 disable_compilation_forever(); 2018 // If compiler initialization failed, no compiler thread that is specific to a 2019 // particular compiler runtime will ever start to compile methods. 2020 shutdown_compiler_runtime(comp, thread); 2021 return false; 2022 } 2023 2024 // C1 specific check 2025 if (comp->is_c1() && (thread->get_buffer_blob() == nullptr)) { 2026 warning("Initialization of %s thread failed (no space to run compilers)", thread->name()); 2027 return false; 2028 } 2029 2030 return true; 2031 } 2032 2033 void CompileBroker::free_buffer_blob_if_allocated(CompilerThread* thread) { 2034 BufferBlob* blob = thread->get_buffer_blob(); 2035 if (blob != nullptr) { 2036 blob->purge(); 2037 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2038 CodeCache::free(blob); 2039 } 2040 } 2041 2042 /** 2043 * If C1 and/or C2 initialization failed, we shut down all compilation. 2044 * We do this to keep things simple. This can be changed if it ever turns 2045 * out to be a problem. 2046 */ 2047 void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) { 2048 free_buffer_blob_if_allocated(thread); 2049 2050 log_info(compilation)("shutdown_compiler_runtime: " INTPTR_FORMAT, p2i(thread)); 2051 2052 if (comp->should_perform_shutdown()) { 2053 // There are two reasons for shutting down the compiler 2054 // 1) compiler runtime initialization failed 2055 // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing 2056 warning("%s initialization failed. Shutting down all compilers", comp->name()); 2057 2058 // Only one thread per compiler runtime object enters here 2059 // Set state to shut down 2060 comp->set_shut_down(); 2061 2062 // Delete all queued compilation tasks to make compiler threads exit faster. 2063 if (_c1_compile_queue != nullptr) { 2064 _c1_compile_queue->delete_all(); 2065 } 2066 2067 if (_c2_compile_queue != nullptr) { 2068 _c2_compile_queue->delete_all(); 2069 } 2070 2071 // Set flags so that we continue execution with using interpreter only. 2072 UseCompiler = false; 2073 UseInterpreter = true; 2074 2075 // We could delete compiler runtimes also. However, there are references to 2076 // the compiler runtime(s) (e.g., nmethod::is_compiled_by_c1()) which then 2077 // fail. This can be done later if necessary. 2078 } 2079 } 2080 2081 /** 2082 * Helper function to create new or reuse old CompileLog. 2083 */ 2084 CompileLog* CompileBroker::get_log(CompilerThread* ct) { 2085 if (!LogCompilation) return nullptr; 2086 2087 AbstractCompiler *compiler = ct->compiler(); 2088 bool c1 = compiler->is_c1(); 2089 jobject* compiler_objects = c1 ? _compiler1_objects : _compiler2_objects; 2090 assert(compiler_objects != nullptr, "must be initialized at this point"); 2091 CompileLog** logs = c1 ? _compiler1_logs : _compiler2_logs; 2092 assert(logs != nullptr, "must be initialized at this point"); 2093 int count = c1 ? _c1_count : _c2_count; 2094 2095 if (ct->queue() == _ac1_compile_queue || ct->queue() == _ac2_compile_queue) { 2096 compiler_objects = _ac_objects; 2097 logs = _ac_logs; 2098 count = _ac_count; 2099 } 2100 // Find Compiler number by its threadObj. 2101 oop compiler_obj = ct->threadObj(); 2102 int compiler_number = 0; 2103 bool found = false; 2104 for (; compiler_number < count; compiler_number++) { 2105 if (JNIHandles::resolve_non_null(compiler_objects[compiler_number]) == compiler_obj) { 2106 found = true; 2107 break; 2108 } 2109 } 2110 assert(found, "Compiler must exist at this point"); 2111 2112 // Determine pointer for this thread's log. 2113 CompileLog** log_ptr = &logs[compiler_number]; 2114 2115 // Return old one if it exists. 2116 CompileLog* log = *log_ptr; 2117 if (log != nullptr) { 2118 ct->init_log(log); 2119 return log; 2120 } 2121 2122 // Create a new one and remember it. 2123 init_compiler_thread_log(); 2124 log = ct->log(); 2125 *log_ptr = log; 2126 return log; 2127 } 2128 2129 // ------------------------------------------------------------------ 2130 // CompileBroker::compiler_thread_loop 2131 // 2132 // The main loop run by a CompilerThread. 2133 void CompileBroker::compiler_thread_loop() { 2134 CompilerThread* thread = CompilerThread::current(); 2135 CompileQueue* queue = thread->queue(); 2136 // For the thread that initializes the ciObjectFactory 2137 // this resource mark holds all the shared objects 2138 ResourceMark rm; 2139 2140 // First thread to get here will initialize the compiler interface 2141 2142 { 2143 ASSERT_IN_VM; 2144 MutexLocker only_one (thread, CompileThread_lock); 2145 if (!ciObjectFactory::is_initialized()) { 2146 ciObjectFactory::initialize(); 2147 } 2148 } 2149 2150 // Open a log. 2151 CompileLog* log = get_log(thread); 2152 if (log != nullptr) { 2153 log->begin_elem("start_compile_thread name='%s' thread='%zu' process='%d'", 2154 thread->name(), 2155 os::current_thread_id(), 2156 os::current_process_id()); 2157 log->stamp(); 2158 log->end_elem(); 2159 } 2160 2161 if (!thread->init_compilation_timeout()) { 2162 return; 2163 } 2164 2165 // If compiler thread/runtime initialization fails, exit the compiler thread 2166 if (!init_compiler_runtime()) { 2167 return; 2168 } 2169 2170 thread->start_idle_timer(); 2171 2172 // Poll for new compilation tasks as long as the JVM runs. Compilation 2173 // should only be disabled if something went wrong while initializing the 2174 // compiler runtimes. This, in turn, should not happen. The only known case 2175 // when compiler runtime initialization fails is if there is not enough free 2176 // space in the code cache to generate the necessary stubs, etc. 2177 while (!is_compilation_disabled_forever()) { 2178 // We need this HandleMark to avoid leaking VM handles. 2179 HandleMark hm(thread); 2180 2181 RecompilationPolicy::recompilation_step(AOTRecompilationWorkUnitSize, thread); 2182 2183 CompileTask* task = queue->get(thread); 2184 if (task == nullptr) { 2185 if (UseDynamicNumberOfCompilerThreads) { 2186 // Access compiler_count under lock to enforce consistency. 2187 MutexLocker only_one(CompileThread_lock); 2188 if (can_remove(thread, true)) { 2189 if (trace_compiler_threads()) { 2190 ResourceMark rm; 2191 stringStream msg; 2192 msg.print("Removing compiler thread %s after " JLONG_FORMAT " ms idle time", 2193 thread->name(), thread->idle_time_millis()); 2194 print_compiler_threads(msg); 2195 } 2196 2197 // Notify compiler that the compiler thread is about to stop 2198 thread->compiler()->stopping_compiler_thread(thread); 2199 2200 free_buffer_blob_if_allocated(thread); 2201 return; // Stop this thread. 2202 } 2203 } 2204 } else { 2205 // Assign the task to the current thread. Mark this compilation 2206 // thread as active for the profiler. 2207 // CompileTaskWrapper also keeps the Method* from being deallocated if redefinition 2208 // occurs after fetching the compile task off the queue. 2209 CompileTaskWrapper ctw(task); 2210 methodHandle method(thread, task->method()); 2211 2212 // Never compile a method if breakpoints are present in it 2213 if (method()->number_of_breakpoints() == 0) { 2214 // Compile the method. 2215 if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { 2216 invoke_compiler_on_method(task); 2217 thread->start_idle_timer(); 2218 } else { 2219 // After compilation is disabled, remove remaining methods from queue 2220 method->clear_queued_for_compilation(); 2221 method->set_pending_queue_processed(false); 2222 task->set_failure_reason("compilation is disabled"); 2223 } 2224 } else { 2225 task->set_failure_reason("breakpoints are present"); 2226 } 2227 2228 // Don't use AOT compielr threads for dynamic C1 and C2 threads creation. 2229 if (UseDynamicNumberOfCompilerThreads && 2230 (queue == _c1_compile_queue || queue == _c2_compile_queue)) { 2231 possibly_add_compiler_threads(thread); 2232 assert(!thread->has_pending_exception(), "should have been handled"); 2233 } 2234 } 2235 } 2236 2237 // Shut down compiler runtime 2238 shutdown_compiler_runtime(thread->compiler(), thread); 2239 } 2240 2241 // ------------------------------------------------------------------ 2242 // CompileBroker::init_compiler_thread_log 2243 // 2244 // Set up state required by +LogCompilation. 2245 void CompileBroker::init_compiler_thread_log() { 2246 CompilerThread* thread = CompilerThread::current(); 2247 char file_name[4*K]; 2248 FILE* fp = nullptr; 2249 intx thread_id = os::current_thread_id(); 2250 for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) { 2251 const char* dir = (try_temp_dir ? os::get_temp_directory() : nullptr); 2252 if (dir == nullptr) { 2253 jio_snprintf(file_name, sizeof(file_name), "hs_c%zu_pid%u.log", 2254 thread_id, os::current_process_id()); 2255 } else { 2256 jio_snprintf(file_name, sizeof(file_name), 2257 "%s%shs_c%zu_pid%u.log", dir, 2258 os::file_separator(), thread_id, os::current_process_id()); 2259 } 2260 2261 fp = os::fopen(file_name, "wt"); 2262 if (fp != nullptr) { 2263 if (LogCompilation && Verbose) { 2264 tty->print_cr("Opening compilation log %s", file_name); 2265 } 2266 CompileLog* log = new(mtCompiler) CompileLog(file_name, fp, thread_id); 2267 if (log == nullptr) { 2268 fclose(fp); 2269 return; 2270 } 2271 thread->init_log(log); 2272 2273 if (xtty != nullptr) { 2274 ttyLocker ttyl; 2275 // Record any per thread log files 2276 xtty->elem("thread_logfile thread='%zd' filename='%s'", thread_id, file_name); 2277 } 2278 return; 2279 } 2280 } 2281 warning("Cannot open log file: %s", file_name); 2282 } 2283 2284 void CompileBroker::log_metaspace_failure() { 2285 const char* message = "some methods may not be compiled because metaspace " 2286 "is out of memory"; 2287 if (CompilationLog::log() != nullptr) { 2288 CompilationLog::log()->log_metaspace_failure(message); 2289 } 2290 if (PrintCompilation) { 2291 tty->print_cr("COMPILE PROFILING SKIPPED: %s", message); 2292 } 2293 } 2294 2295 2296 // ------------------------------------------------------------------ 2297 // CompileBroker::set_should_block 2298 // 2299 // Set _should_block. 2300 // Call this from the VM, with Threads_lock held and a safepoint requested. 2301 void CompileBroker::set_should_block() { 2302 assert(Threads_lock->owner() == Thread::current(), "must have threads lock"); 2303 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint already"); 2304 #ifndef PRODUCT 2305 if (PrintCompilation && (Verbose || WizardMode)) 2306 tty->print_cr("notifying compiler thread pool to block"); 2307 #endif 2308 _should_block = true; 2309 } 2310 2311 // ------------------------------------------------------------------ 2312 // CompileBroker::maybe_block 2313 // 2314 // Call this from the compiler at convenient points, to poll for _should_block. 2315 void CompileBroker::maybe_block() { 2316 if (_should_block) { 2317 #ifndef PRODUCT 2318 if (PrintCompilation && (Verbose || WizardMode)) 2319 tty->print_cr("compiler thread " INTPTR_FORMAT " poll detects block request", p2i(Thread::current())); 2320 #endif 2321 // If we are executing a task during the request to block, report the task 2322 // before disappearing. 2323 CompilerThread* thread = CompilerThread::current(); 2324 if (thread != nullptr) { 2325 CompileTask* task = thread->task(); 2326 if (task != nullptr) { 2327 if (PrintCompilation) { 2328 task->print(tty, "blocked"); 2329 } 2330 task->print_ul("blocked"); 2331 } 2332 } 2333 // Go to VM state and block for final VM shutdown safepoint. 2334 ThreadInVMfromNative tivfn(JavaThread::current()); 2335 assert(false, "Should never unblock from TIVNM entry"); 2336 } 2337 } 2338 2339 // wrapper for CodeCache::print_summary() 2340 static void codecache_print(bool detailed) 2341 { 2342 stringStream s; 2343 // Dump code cache into a buffer before locking the tty, 2344 { 2345 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2346 CodeCache::print_summary(&s, detailed); 2347 } 2348 ttyLocker ttyl; 2349 tty->print("%s", s.freeze()); 2350 } 2351 2352 // wrapper for CodeCache::print_summary() using outputStream 2353 static void codecache_print(outputStream* out, bool detailed) { 2354 stringStream s; 2355 2356 // Dump code cache into a buffer 2357 { 2358 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2359 CodeCache::print_summary(&s, detailed); 2360 } 2361 2362 char* remaining_log = s.as_string(); 2363 while (*remaining_log != '\0') { 2364 char* eol = strchr(remaining_log, '\n'); 2365 if (eol == nullptr) { 2366 out->print_cr("%s", remaining_log); 2367 remaining_log = remaining_log + strlen(remaining_log); 2368 } else { 2369 *eol = '\0'; 2370 out->print_cr("%s", remaining_log); 2371 remaining_log = eol + 1; 2372 } 2373 } 2374 } 2375 2376 void CompileBroker::handle_compile_error(CompilerThread* thread, CompileTask* task, ciEnv* ci_env, 2377 int compilable, const char* failure_reason) { 2378 if (!AbortVMOnCompilationFailure) { 2379 return; 2380 } 2381 if (compilable == ciEnv::MethodCompilable_not_at_tier) { 2382 fatal("Not compilable at tier %d: %s", task->comp_level(), failure_reason); 2383 } 2384 if (compilable == ciEnv::MethodCompilable_never) { 2385 fatal("Never compilable: %s", failure_reason); 2386 } 2387 } 2388 2389 static void post_compilation_event(EventCompilation& event, CompileTask* task) { 2390 assert(task != nullptr, "invariant"); 2391 CompilerEvent::CompilationEvent::post(event, 2392 task->compile_id(), 2393 task->compiler()->type(), 2394 task->method(), 2395 task->comp_level(), 2396 task->is_success(), 2397 task->osr_bci() != CompileBroker::standard_entry_bci, 2398 task->nm_total_size(), 2399 task->num_inlined_bytecodes(), 2400 task->arena_bytes()); 2401 } 2402 2403 int DirectivesStack::_depth = 0; 2404 CompilerDirectives* DirectivesStack::_top = nullptr; 2405 CompilerDirectives* DirectivesStack::_bottom = nullptr; 2406 2407 // Acquires Compilation_lock and waits for it to be notified 2408 // as long as WhiteBox::compilation_locked is true. 2409 static void whitebox_lock_compilation() { 2410 MonitorLocker locker(Compilation_lock, Mutex::_no_safepoint_check_flag); 2411 while (WhiteBox::compilation_locked) { 2412 locker.wait(); 2413 } 2414 } 2415 2416 // ------------------------------------------------------------------ 2417 // CompileBroker::invoke_compiler_on_method 2418 // 2419 // Compile a method. 2420 // 2421 void CompileBroker::invoke_compiler_on_method(CompileTask* task) { 2422 task->print_ul(); 2423 elapsedTimer time; 2424 2425 DirectiveSet* directive = task->directive(); 2426 2427 CompilerThread* thread = CompilerThread::current(); 2428 ResourceMark rm(thread); 2429 2430 if (CompilationLog::log() != nullptr) { 2431 CompilationLog::log()->log_compile(thread, task); 2432 } 2433 2434 // Common flags. 2435 int compile_id = task->compile_id(); 2436 int osr_bci = task->osr_bci(); 2437 bool is_osr = (osr_bci != standard_entry_bci); 2438 bool should_log = (thread->log() != nullptr); 2439 bool should_break = false; 2440 bool should_print_compilation = PrintCompilation || directive->PrintCompilationOption; 2441 const int task_level = task->comp_level(); 2442 AbstractCompiler* comp = task->compiler(); 2443 { 2444 // create the handle inside it's own block so it can't 2445 // accidentally be referenced once the thread transitions to 2446 // native. The NoHandleMark before the transition should catch 2447 // any cases where this occurs in the future. 2448 methodHandle method(thread, task->method()); 2449 2450 assert(!method->is_native(), "no longer compile natives"); 2451 2452 // Update compile information when using perfdata. 2453 if (UsePerfData) { 2454 update_compile_perf_data(thread, method, is_osr); 2455 } 2456 2457 DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level)); 2458 } 2459 2460 should_break = directive->BreakAtCompileOption || task->check_break_at_flags(); 2461 if (should_log && !directive->LogOption) { 2462 should_log = false; 2463 } 2464 2465 // Allocate a new set of JNI handles. 2466 JNIHandleMark jhm(thread); 2467 Method* target_handle = task->method(); 2468 int compilable = ciEnv::MethodCompilable; 2469 const char* failure_reason = nullptr; 2470 bool failure_reason_on_C_heap = false; 2471 const char* retry_message = nullptr; 2472 2473 #if INCLUDE_JVMCI 2474 if (UseJVMCICompiler && comp != nullptr && comp->is_jvmci()) { 2475 JVMCICompiler* jvmci = (JVMCICompiler*) comp; 2476 2477 TraceTime t1("compilation", &time); 2478 EventCompilation event; 2479 JVMCICompileState compile_state(task, jvmci); 2480 JVMCIRuntime *runtime = nullptr; 2481 2482 if (JVMCI::in_shutdown()) { 2483 failure_reason = "in JVMCI shutdown"; 2484 retry_message = "not retryable"; 2485 compilable = ciEnv::MethodCompilable_never; 2486 } else if (compile_state.target_method_is_old()) { 2487 // Skip redefined methods 2488 failure_reason = "redefined method"; 2489 retry_message = "not retryable"; 2490 compilable = ciEnv::MethodCompilable_never; 2491 } else { 2492 JVMCIEnv env(thread, &compile_state, __FILE__, __LINE__); 2493 if (env.init_error() != JNI_OK) { 2494 const char* msg = env.init_error_msg(); 2495 failure_reason = os::strdup(err_msg("Error attaching to libjvmci (err: %d, %s)", 2496 env.init_error(), msg == nullptr ? "unknown" : msg), mtJVMCI); 2497 bool reason_on_C_heap = true; 2498 // In case of JNI_ENOMEM, there's a good chance a subsequent attempt to create libjvmci or attach to it 2499 // might succeed. Other errors most likely indicate a non-recoverable error in the JVMCI runtime. 2500 bool retryable = env.init_error() == JNI_ENOMEM; 2501 compile_state.set_failure(retryable, failure_reason, reason_on_C_heap); 2502 } 2503 if (failure_reason == nullptr) { 2504 if (WhiteBoxAPI && WhiteBox::compilation_locked) { 2505 // Must switch to native to block 2506 ThreadToNativeFromVM ttn(thread); 2507 whitebox_lock_compilation(); 2508 } 2509 methodHandle method(thread, target_handle); 2510 runtime = env.runtime(); 2511 runtime->compile_method(&env, jvmci, method, osr_bci); 2512 2513 failure_reason = compile_state.failure_reason(); 2514 failure_reason_on_C_heap = compile_state.failure_reason_on_C_heap(); 2515 if (!compile_state.retryable()) { 2516 retry_message = "not retryable"; 2517 compilable = ciEnv::MethodCompilable_not_at_tier; 2518 } 2519 if (!task->is_success()) { 2520 assert(failure_reason != nullptr, "must specify failure_reason"); 2521 } 2522 } 2523 } 2524 if (!task->is_success() && !JVMCI::in_shutdown()) { 2525 handle_compile_error(thread, task, nullptr, compilable, failure_reason); 2526 } 2527 if (event.should_commit()) { 2528 post_compilation_event(event, task); 2529 } 2530 2531 if (runtime != nullptr) { 2532 runtime->post_compile(thread); 2533 } 2534 } else 2535 #endif // INCLUDE_JVMCI 2536 { 2537 NoHandleMark nhm; 2538 ThreadToNativeFromVM ttn(thread); 2539 2540 ciEnv ci_env(task); 2541 if (should_break) { 2542 ci_env.set_break_at_compile(true); 2543 } 2544 if (should_log) { 2545 ci_env.set_log(thread->log()); 2546 } 2547 assert(thread->env() == &ci_env, "set by ci_env"); 2548 // The thread-env() field is cleared in ~CompileTaskWrapper. 2549 2550 // Cache Jvmti state 2551 bool method_is_old = ci_env.cache_jvmti_state(); 2552 2553 // Skip redefined methods 2554 if (method_is_old) { 2555 ci_env.record_method_not_compilable("redefined method", true); 2556 } 2557 2558 // Cache DTrace flags 2559 ci_env.cache_dtrace_flags(); 2560 2561 ciMethod* target = ci_env.get_method_from_handle(target_handle); 2562 2563 TraceTime t1("compilation", &time); 2564 EventCompilation event; 2565 2566 if (comp == nullptr) { 2567 ci_env.record_method_not_compilable("no compiler"); 2568 } else if (!ci_env.failing()) { 2569 if (WhiteBoxAPI && WhiteBox::compilation_locked) { 2570 whitebox_lock_compilation(); 2571 } 2572 comp->compile_method(&ci_env, target, osr_bci, true, directive); 2573 2574 /* Repeat compilation without installing code for profiling purposes */ 2575 int repeat_compilation_count = directive->RepeatCompilationOption; 2576 while (repeat_compilation_count > 0) { 2577 ResourceMark rm(thread); 2578 task->print_ul("NO CODE INSTALLED"); 2579 thread->timeout()->reset(); 2580 comp->compile_method(&ci_env, target, osr_bci, false, directive); 2581 repeat_compilation_count--; 2582 } 2583 } 2584 2585 2586 if (!ci_env.failing() && !task->is_success() && !task->is_precompile()) { 2587 assert(ci_env.failure_reason() != nullptr, "expect failure reason"); 2588 assert(false, "compiler should always document failure: %s", ci_env.failure_reason()); 2589 // The compiler elected, without comment, not to register a result. 2590 // Do not attempt further compilations of this method. 2591 ci_env.record_method_not_compilable("compile failed"); 2592 } 2593 2594 // Copy this bit to the enclosing block: 2595 compilable = ci_env.compilable(); 2596 2597 if (ci_env.failing()) { 2598 // Duplicate the failure reason string, so that it outlives ciEnv 2599 failure_reason = os::strdup(ci_env.failure_reason(), mtCompiler); 2600 failure_reason_on_C_heap = true; 2601 retry_message = ci_env.retry_message(); 2602 ci_env.report_failure(failure_reason); 2603 } 2604 2605 if (ci_env.failing()) { 2606 handle_compile_error(thread, task, &ci_env, compilable, failure_reason); 2607 } 2608 if (event.should_commit()) { 2609 post_compilation_event(event, task); 2610 } 2611 } 2612 2613 if (failure_reason != nullptr) { 2614 task->set_failure_reason(failure_reason, failure_reason_on_C_heap); 2615 if (CompilationLog::log() != nullptr) { 2616 CompilationLog::log()->log_failure(thread, task, failure_reason, retry_message); 2617 } 2618 if (PrintCompilation || directive->PrintCompilationOption) { 2619 FormatBufferResource msg = retry_message != nullptr ? 2620 FormatBufferResource("COMPILE SKIPPED: %s (%s)", failure_reason, retry_message) : 2621 FormatBufferResource("COMPILE SKIPPED: %s", failure_reason); 2622 task->print(tty, msg); 2623 } 2624 } 2625 2626 task->mark_finished(os::elapsed_counter()); 2627 DirectivesStack::release(directive); 2628 2629 methodHandle method(thread, task->method()); 2630 2631 DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success()); 2632 2633 collect_statistics(thread, time, task); 2634 2635 if (PrintCompilation && PrintCompilation2) { 2636 tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp 2637 tty->print("%4d ", compile_id); // print compilation number 2638 tty->print("%s ", (is_osr ? "%" : (task->is_aot_load() ? (task->preload() ? "P" : "A") : " "))); 2639 if (task->is_success()) { 2640 tty->print("size: %d(%d) ", task->nm_total_size(), task->nm_insts_size()); 2641 } 2642 tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes()); 2643 } 2644 2645 Log(compilation, codecache) log; 2646 if (log.is_debug()) { 2647 LogStream ls(log.debug()); 2648 codecache_print(&ls, /* detailed= */ false); 2649 } 2650 if (PrintCodeCacheOnCompilation) { 2651 codecache_print(/* detailed= */ false); 2652 } 2653 // Disable compilation, if required. 2654 switch (compilable) { 2655 case ciEnv::MethodCompilable_never: 2656 if (is_osr) 2657 method->set_not_osr_compilable_quietly("MethodCompilable_never"); 2658 else 2659 method->set_not_compilable_quietly("MethodCompilable_never"); 2660 break; 2661 case ciEnv::MethodCompilable_not_at_tier: 2662 if (is_osr) 2663 method->set_not_osr_compilable_quietly("MethodCompilable_not_at_tier", task_level); 2664 else 2665 method->set_not_compilable_quietly("MethodCompilable_not_at_tier", task_level); 2666 break; 2667 } 2668 2669 // Note that the queued_for_compilation bits are cleared without 2670 // protection of a mutex. [They were set by the requester thread, 2671 // when adding the task to the compile queue -- at which time the 2672 // compile queue lock was held. Subsequently, we acquired the compile 2673 // queue lock to get this task off the compile queue; thus (to belabour 2674 // the point somewhat) our clearing of the bits must be occurring 2675 // only after the setting of the bits. See also 14012000 above. 2676 method->clear_queued_for_compilation(); 2677 method->set_pending_queue_processed(false); 2678 2679 if (should_print_compilation) { 2680 ResourceMark rm; 2681 task->print_tty(); 2682 } 2683 } 2684 2685 /** 2686 * The CodeCache is full. Print warning and disable compilation. 2687 * Schedule code cache cleaning so compilation can continue later. 2688 * This function needs to be called only from CodeCache::allocate(), 2689 * since we currently handle a full code cache uniformly. 2690 */ 2691 void CompileBroker::handle_full_code_cache(CodeBlobType code_blob_type) { 2692 UseInterpreter = true; 2693 if (UseCompiler || AlwaysCompileLoopMethods ) { 2694 if (xtty != nullptr) { 2695 stringStream s; 2696 // Dump code cache state into a buffer before locking the tty, 2697 // because log_state() will use locks causing lock conflicts. 2698 CodeCache::log_state(&s); 2699 // Lock to prevent tearing 2700 ttyLocker ttyl; 2701 xtty->begin_elem("code_cache_full"); 2702 xtty->print("%s", s.freeze()); 2703 xtty->stamp(); 2704 xtty->end_elem(); 2705 } 2706 2707 #ifndef PRODUCT 2708 if (ExitOnFullCodeCache) { 2709 codecache_print(/* detailed= */ true); 2710 before_exit(JavaThread::current()); 2711 exit_globals(); // will delete tty 2712 vm_direct_exit(1); 2713 } 2714 #endif 2715 if (UseCodeCacheFlushing) { 2716 // Since code cache is full, immediately stop new compiles 2717 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { 2718 log_info(codecache)("Code cache is full - disabling compilation"); 2719 } 2720 } else { 2721 disable_compilation_forever(); 2722 } 2723 2724 CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning()); 2725 } 2726 } 2727 2728 // ------------------------------------------------------------------ 2729 // CompileBroker::update_compile_perf_data 2730 // 2731 // Record this compilation for debugging purposes. 2732 void CompileBroker::update_compile_perf_data(CompilerThread* thread, const methodHandle& method, bool is_osr) { 2733 ResourceMark rm; 2734 char* method_name = method->name()->as_C_string(); 2735 char current_method[CompilerCounters::cmname_buffer_length]; 2736 size_t maxLen = CompilerCounters::cmname_buffer_length; 2737 2738 const char* class_name = method->method_holder()->name()->as_C_string(); 2739 2740 size_t s1len = strlen(class_name); 2741 size_t s2len = strlen(method_name); 2742 2743 // check if we need to truncate the string 2744 if (s1len + s2len + 2 > maxLen) { 2745 2746 // the strategy is to lop off the leading characters of the 2747 // class name and the trailing characters of the method name. 2748 2749 if (s2len + 2 > maxLen) { 2750 // lop of the entire class name string, let snprintf handle 2751 // truncation of the method name. 2752 class_name += s1len; // null string 2753 } 2754 else { 2755 // lop off the extra characters from the front of the class name 2756 class_name += ((s1len + s2len + 2) - maxLen); 2757 } 2758 } 2759 2760 jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name); 2761 2762 int last_compile_type = normal_compile; 2763 if (CICountOSR && is_osr) { 2764 last_compile_type = osr_compile; 2765 } else if (CICountNative && method->is_native()) { 2766 last_compile_type = native_compile; 2767 } 2768 2769 CompilerCounters* counters = thread->counters(); 2770 counters->set_current_method(current_method); 2771 counters->set_compile_type((jlong) last_compile_type); 2772 } 2773 2774 // ------------------------------------------------------------------ 2775 // CompileBroker::collect_statistics 2776 // 2777 // Collect statistics about the compilation. 2778 2779 void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task) { 2780 bool success = task->is_success(); 2781 methodHandle method (thread, task->method()); 2782 int compile_id = task->compile_id(); 2783 bool is_osr = (task->osr_bci() != standard_entry_bci); 2784 const int comp_level = task->comp_level(); 2785 CompilerCounters* counters = thread->counters(); 2786 2787 MutexLocker locker(CompileStatistics_lock); 2788 2789 // _perf variables are production performance counters which are 2790 // updated regardless of the setting of the CITime and CITimeEach flags 2791 // 2792 2793 // account all time, including bailouts and failures in this counter; 2794 // C1 and C2 counters are counting both successful and unsuccessful compiles 2795 _t_total_compilation.add(&time); 2796 2797 // Update compilation times. Used by the implementation of JFR CompilerStatistics 2798 // and java.lang.management.CompilationMXBean. 2799 _perf_total_compilation->inc(time.ticks()); 2800 _peak_compilation_time = MAX2(time.milliseconds(), _peak_compilation_time); 2801 2802 if (!success) { 2803 _total_bailout_count++; 2804 if (UsePerfData) { 2805 _perf_last_failed_method->set_value(counters->current_method()); 2806 _perf_last_failed_type->set_value(counters->compile_type()); 2807 _perf_total_bailout_count->inc(); 2808 } 2809 _t_bailedout_compilation.add(&time); 2810 2811 if (CITime || log_is_enabled(Info, init)) { 2812 CompilerStatistics* stats = nullptr; 2813 if (task->is_aot_load()) { 2814 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2815 stats = &_aot_stats_per_level[level]; 2816 } else { 2817 stats = &_stats_per_level[comp_level-1]; 2818 } 2819 stats->_bailout.update(time, 0); 2820 } 2821 } else if (!task->is_success()) { 2822 if (UsePerfData) { 2823 _perf_last_invalidated_method->set_value(counters->current_method()); 2824 _perf_last_invalidated_type->set_value(counters->compile_type()); 2825 _perf_total_invalidated_count->inc(); 2826 } 2827 _total_invalidated_count++; 2828 _t_invalidated_compilation.add(&time); 2829 2830 if (CITime || log_is_enabled(Info, init)) { 2831 CompilerStatistics* stats = nullptr; 2832 if (task->is_aot_load()) { 2833 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2834 stats = &_aot_stats_per_level[level]; 2835 } else { 2836 stats = &_stats_per_level[comp_level-1]; 2837 } 2838 stats->_invalidated.update(time, 0); 2839 } 2840 } else { 2841 // Compilation succeeded 2842 if (CITime || log_is_enabled(Info, init)) { 2843 int bytes_compiled = method->code_size() + task->num_inlined_bytecodes(); 2844 if (is_osr) { 2845 _t_osr_compilation.add(&time); 2846 _sum_osr_bytes_compiled += bytes_compiled; 2847 } else { 2848 _t_standard_compilation.add(&time); 2849 _sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); 2850 } 2851 2852 // Collect statistic per compilation level 2853 if (task->is_aot_load()) { 2854 _aot_stats._standard.update(time, bytes_compiled); 2855 _aot_stats._nmethods_size += task->nm_total_size(); 2856 _aot_stats._nmethods_code_size += task->nm_insts_size(); 2857 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2858 CompilerStatistics* stats = &_aot_stats_per_level[level]; 2859 stats->_standard.update(time, bytes_compiled); 2860 stats->_nmethods_size += task->nm_total_size(); 2861 stats->_nmethods_code_size += task->nm_insts_size(); 2862 } else if (comp_level > CompLevel_none && comp_level <= CompLevel_full_optimization) { 2863 CompilerStatistics* stats = &_stats_per_level[comp_level-1]; 2864 if (is_osr) { 2865 stats->_osr.update(time, bytes_compiled); 2866 } else { 2867 stats->_standard.update(time, bytes_compiled); 2868 } 2869 stats->_nmethods_size += task->nm_total_size(); 2870 stats->_nmethods_code_size += task->nm_insts_size(); 2871 } else { 2872 assert(false, "CompilerStatistics object does not exist for compilation level %d", comp_level); 2873 } 2874 2875 // Collect statistic per compiler 2876 AbstractCompiler* comp = task->compiler(); 2877 if (comp && !task->is_aot_load()) { 2878 CompilerStatistics* stats = comp->stats(); 2879 if (is_osr) { 2880 stats->_osr.update(time, bytes_compiled); 2881 } else { 2882 stats->_standard.update(time, bytes_compiled); 2883 } 2884 stats->_nmethods_size += task->nm_total_size(); 2885 stats->_nmethods_code_size += task->nm_insts_size(); 2886 } else if (!task->is_aot_load()) { // if (!comp) 2887 assert(false, "Compiler object must exist"); 2888 } 2889 } 2890 2891 if (UsePerfData) { 2892 // save the name of the last method compiled 2893 _perf_last_method->set_value(counters->current_method()); 2894 _perf_last_compile_type->set_value(counters->compile_type()); 2895 _perf_last_compile_size->set_value(method->code_size() + 2896 task->num_inlined_bytecodes()); 2897 if (is_osr) { 2898 _perf_osr_compilation->inc(time.ticks()); 2899 _perf_sum_osr_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes()); 2900 } else { 2901 _perf_standard_compilation->inc(time.ticks()); 2902 _perf_sum_standard_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes()); 2903 } 2904 } 2905 2906 if (CITimeEach) { 2907 double compile_time = time.seconds(); 2908 double bytes_per_sec = compile_time == 0.0 ? 0.0 : (double)(method->code_size() + task->num_inlined_bytecodes()) / compile_time; 2909 tty->print_cr("%3d seconds: %6.3f bytes/sec : %f (bytes %d + %d inlined)", 2910 compile_id, compile_time, bytes_per_sec, method->code_size(), task->num_inlined_bytecodes()); 2911 } 2912 2913 // Collect counts of successful compilations 2914 _sum_nmethod_size += task->nm_total_size(); 2915 _sum_nmethod_code_size += task->nm_insts_size(); 2916 _total_compile_count++; 2917 2918 if (UsePerfData) { 2919 _perf_sum_nmethod_size->inc( task->nm_total_size()); 2920 _perf_sum_nmethod_code_size->inc(task->nm_insts_size()); 2921 _perf_total_compile_count->inc(); 2922 } 2923 2924 if (is_osr) { 2925 if (UsePerfData) _perf_total_osr_compile_count->inc(); 2926 _total_osr_compile_count++; 2927 } else { 2928 if (UsePerfData) _perf_total_standard_compile_count->inc(); 2929 _total_standard_compile_count++; 2930 } 2931 } 2932 // set the current method for the thread to null 2933 if (UsePerfData) counters->set_current_method(""); 2934 } 2935 2936 const char* CompileBroker::compiler_name(int comp_level) { 2937 AbstractCompiler *comp = CompileBroker::compiler(comp_level); 2938 if (comp == nullptr) { 2939 return "no compiler"; 2940 } else { 2941 return (comp->name()); 2942 } 2943 } 2944 2945 jlong CompileBroker::total_compilation_ticks() { 2946 return _perf_total_compilation != nullptr ? _perf_total_compilation->get_value() : 0; 2947 } 2948 2949 void CompileBroker::log_not_entrant(nmethod* nm) { 2950 _total_not_entrant_count++; 2951 if (CITime || log_is_enabled(Info, init)) { 2952 CompilerStatistics* stats = nullptr; 2953 int level = nm->comp_level(); 2954 if (nm->is_aot()) { 2955 if (nm->preloaded()) { 2956 assert(level == CompLevel_full_optimization, "%d", level); 2957 level = CompLevel_full_optimization + 1; 2958 } 2959 stats = &_aot_stats_per_level[level - 1]; 2960 } else { 2961 stats = &_stats_per_level[level - 1]; 2962 } 2963 stats->_made_not_entrant._count++; 2964 } 2965 } 2966 2967 void CompileBroker::print_times(const char* name, CompilerStatistics* stats) { 2968 tty->print_cr(" %s {speed: %6.3f bytes/s; standard: %6.3f s, %u bytes, %u methods; osr: %6.3f s, %u bytes, %u methods; nmethods_size: %u bytes; nmethods_code_size: %u bytes}", 2969 name, stats->bytes_per_second(), 2970 stats->_standard._time.seconds(), stats->_standard._bytes, stats->_standard._count, 2971 stats->_osr._time.seconds(), stats->_osr._bytes, stats->_osr._count, 2972 stats->_nmethods_size, stats->_nmethods_code_size); 2973 } 2974 2975 static void print_helper(outputStream* st, const char* name, CompilerStatistics::Data data, bool print_time = true) { 2976 if (data._count > 0) { 2977 st->print("; %s: %4u methods", name, data._count); 2978 if (print_time) { 2979 st->print(" (in %.3fs)", data._time.seconds()); 2980 } 2981 } 2982 } 2983 2984 static void print_tier_helper(outputStream* st, const char* prefix, int tier, CompilerStatistics* stats) { 2985 st->print(" %s%d: %5u methods", prefix, tier, stats->_standard._count); 2986 if (stats->_standard._count > 0) { 2987 st->print(" (in %.3fs)", stats->_standard._time.seconds()); 2988 } 2989 print_helper(st, "osr", stats->_osr); 2990 print_helper(st, "bailout", stats->_bailout); 2991 print_helper(st, "invalid", stats->_invalidated); 2992 print_helper(st, "not_entrant", stats->_made_not_entrant, false); 2993 st->cr(); 2994 } 2995 2996 static void print_queue_info(outputStream* st, CompileQueue* queue) { 2997 if (queue != nullptr) { 2998 MutexLocker ml(queue->lock()); 2999 3000 uint total_cnt = 0; 3001 uint active_cnt = 0; 3002 for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) { 3003 guarantee(jt != nullptr, ""); 3004 if (jt->is_Compiler_thread()) { 3005 CompilerThread* ct = (CompilerThread*)jt; 3006 3007 guarantee(ct != nullptr, ""); 3008 if (ct->queue() == queue) { 3009 ++total_cnt; 3010 CompileTask* task = ct->task(); 3011 if (task != nullptr) { 3012 ++active_cnt; 3013 } 3014 } 3015 } 3016 } 3017 3018 st->print(" %s (%d active / %d total threads): %u tasks", 3019 queue->name(), active_cnt, total_cnt, queue->size()); 3020 if (queue->size() > 0) { 3021 uint counts[] = {0, 0, 0, 0, 0}; // T1 ... T5 3022 for (CompileTask* task = queue->first(); task != nullptr; task = task->next()) { 3023 int tier = task->comp_level(); 3024 if (task->is_aot_load() && task->preload()) { 3025 assert(tier == CompLevel_full_optimization, "%d", tier); 3026 tier = CompLevel_full_optimization + 1; 3027 } 3028 counts[tier-1]++; 3029 } 3030 st->print(":"); 3031 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3032 uint cnt = counts[tier-1]; 3033 if (cnt > 0) { 3034 st->print(" T%d: %u tasks;", tier, cnt); 3035 } 3036 } 3037 } 3038 st->cr(); 3039 3040 // for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) { 3041 // guarantee(jt != nullptr, ""); 3042 // if (jt->is_Compiler_thread()) { 3043 // CompilerThread* ct = (CompilerThread*)jt; 3044 // 3045 // guarantee(ct != nullptr, ""); 3046 // if (ct->queue() == queue) { 3047 // ResourceMark rm; 3048 // CompileTask* task = ct->task(); 3049 // st->print(" %s: ", ct->name_raw()); 3050 // if (task != nullptr) { 3051 // task->print(st, nullptr, true /*short_form*/, false /*cr*/); 3052 // } 3053 // st->cr(); 3054 // } 3055 // } 3056 // } 3057 } 3058 } 3059 void CompileBroker::print_statistics_on(outputStream* st) { 3060 st->print_cr(" Total: %u methods; %u bailouts, %u invalidated, %u non_entrant", 3061 _total_compile_count, _total_bailout_count, _total_invalidated_count, _total_not_entrant_count); 3062 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) { 3063 print_tier_helper(st, "Tier", tier, &_stats_per_level[tier-1]); 3064 } 3065 st->cr(); 3066 3067 if (AOTCodeCaching) { 3068 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3069 if (tier != CompLevel_full_profile) { 3070 print_tier_helper(st, "AOT Code T", tier, &_aot_stats_per_level[tier - 1]); 3071 } 3072 } 3073 st->cr(); 3074 } 3075 3076 print_queue_info(st, _c1_compile_queue); 3077 print_queue_info(st, _c2_compile_queue); 3078 print_queue_info(st, _ac1_compile_queue); 3079 print_queue_info(st, _ac2_compile_queue); 3080 } 3081 3082 void CompileBroker::print_times(bool per_compiler, bool aggregate) { 3083 if (per_compiler) { 3084 if (aggregate) { 3085 tty->cr(); 3086 tty->print_cr("[%dms] Individual compiler times (for compiled methods only)", (int)tty->time_stamp().milliseconds()); 3087 tty->print_cr("------------------------------------------------"); 3088 tty->cr(); 3089 } 3090 for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) { 3091 AbstractCompiler* comp = _compilers[i]; 3092 if (comp != nullptr) { 3093 print_times(comp->name(), comp->stats()); 3094 } 3095 } 3096 if (_aot_stats._standard._count > 0) { 3097 print_times("SC", &_aot_stats); 3098 } 3099 if (aggregate) { 3100 tty->cr(); 3101 tty->print_cr("Individual compilation Tier times (for compiled methods only)"); 3102 tty->print_cr("------------------------------------------------"); 3103 tty->cr(); 3104 } 3105 char tier_name[256]; 3106 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) { 3107 CompilerStatistics* stats = &_stats_per_level[tier-1]; 3108 os::snprintf_checked(tier_name, sizeof(tier_name), "Tier%d", tier); 3109 print_times(tier_name, stats); 3110 } 3111 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3112 CompilerStatistics* stats = &_aot_stats_per_level[tier-1]; 3113 if (stats->_standard._bytes > 0) { 3114 os::snprintf_checked(tier_name, sizeof(tier_name), "AOT Code T%d", tier); 3115 print_times(tier_name, stats); 3116 } 3117 } 3118 } 3119 3120 if (!aggregate) { 3121 return; 3122 } 3123 3124 elapsedTimer standard_compilation = CompileBroker::_t_standard_compilation; 3125 elapsedTimer osr_compilation = CompileBroker::_t_osr_compilation; 3126 elapsedTimer total_compilation = CompileBroker::_t_total_compilation; 3127 3128 uint standard_bytes_compiled = CompileBroker::_sum_standard_bytes_compiled; 3129 uint osr_bytes_compiled = CompileBroker::_sum_osr_bytes_compiled; 3130 3131 uint standard_compile_count = CompileBroker::_total_standard_compile_count; 3132 uint osr_compile_count = CompileBroker::_total_osr_compile_count; 3133 uint total_compile_count = CompileBroker::_total_compile_count; 3134 uint total_bailout_count = CompileBroker::_total_bailout_count; 3135 uint total_invalidated_count = CompileBroker::_total_invalidated_count; 3136 3137 uint nmethods_code_size = CompileBroker::_sum_nmethod_code_size; 3138 uint nmethods_size = CompileBroker::_sum_nmethod_size; 3139 3140 tty->cr(); 3141 tty->print_cr("Accumulated compiler times"); 3142 tty->print_cr("----------------------------------------------------------"); 3143 //0000000000111111111122222222223333333333444444444455555555556666666666 3144 //0123456789012345678901234567890123456789012345678901234567890123456789 3145 tty->print_cr(" Total compilation time : %7.3f s", total_compilation.seconds()); 3146 tty->print_cr(" Standard compilation : %7.3f s, Average : %2.3f s", 3147 standard_compilation.seconds(), 3148 standard_compile_count == 0 ? 0.0 : standard_compilation.seconds() / standard_compile_count); 3149 tty->print_cr(" Bailed out compilation : %7.3f s, Average : %2.3f s", 3150 CompileBroker::_t_bailedout_compilation.seconds(), 3151 total_bailout_count == 0 ? 0.0 : CompileBroker::_t_bailedout_compilation.seconds() / total_bailout_count); 3152 tty->print_cr(" On stack replacement : %7.3f s, Average : %2.3f s", 3153 osr_compilation.seconds(), 3154 osr_compile_count == 0 ? 0.0 : osr_compilation.seconds() / osr_compile_count); 3155 tty->print_cr(" Invalidated : %7.3f s, Average : %2.3f s", 3156 CompileBroker::_t_invalidated_compilation.seconds(), 3157 total_invalidated_count == 0 ? 0.0 : CompileBroker::_t_invalidated_compilation.seconds() / total_invalidated_count); 3158 3159 if (AOTCodeCaching) { // Check flags because AOT code cache could be closed already 3160 tty->cr(); 3161 AOTCodeCache::print_timers_on(tty); 3162 } 3163 AbstractCompiler *comp = compiler(CompLevel_simple); 3164 if (comp != nullptr) { 3165 tty->cr(); 3166 comp->print_timers(); 3167 } 3168 comp = compiler(CompLevel_full_optimization); 3169 if (comp != nullptr) { 3170 tty->cr(); 3171 comp->print_timers(); 3172 } 3173 #if INCLUDE_JVMCI 3174 if (EnableJVMCI) { 3175 JVMCICompiler *jvmci_comp = JVMCICompiler::instance(false, JavaThread::current_or_null()); 3176 if (jvmci_comp != nullptr && jvmci_comp != comp) { 3177 tty->cr(); 3178 jvmci_comp->print_timers(); 3179 } 3180 } 3181 #endif 3182 3183 tty->cr(); 3184 tty->print_cr(" Total compiled methods : %8u methods", total_compile_count); 3185 tty->print_cr(" Standard compilation : %8u methods", standard_compile_count); 3186 tty->print_cr(" On stack replacement : %8u methods", osr_compile_count); 3187 uint tcb = osr_bytes_compiled + standard_bytes_compiled; 3188 tty->print_cr(" Total compiled bytecodes : %8u bytes", tcb); 3189 tty->print_cr(" Standard compilation : %8u bytes", standard_bytes_compiled); 3190 tty->print_cr(" On stack replacement : %8u bytes", osr_bytes_compiled); 3191 double tcs = total_compilation.seconds(); 3192 uint bps = tcs == 0.0 ? 0 : (uint)(tcb / tcs); 3193 tty->print_cr(" Average compilation speed : %8u bytes/s", bps); 3194 tty->cr(); 3195 tty->print_cr(" nmethod code size : %8u bytes", nmethods_code_size); 3196 tty->print_cr(" nmethod total size : %8u bytes", nmethods_size); 3197 } 3198 3199 // Print general/accumulated JIT information. 3200 void CompileBroker::print_info(outputStream *out) { 3201 if (out == nullptr) out = tty; 3202 out->cr(); 3203 out->print_cr("======================"); 3204 out->print_cr(" General JIT info "); 3205 out->print_cr("======================"); 3206 out->cr(); 3207 out->print_cr(" JIT is : %7s", should_compile_new_jobs() ? "on" : "off"); 3208 out->print_cr(" Compiler threads : %7d", (int)CICompilerCount); 3209 out->cr(); 3210 out->print_cr("CodeCache overview"); 3211 out->print_cr("--------------------------------------------------------"); 3212 out->cr(); 3213 out->print_cr(" Reserved size : %7zu KB", CodeCache::max_capacity() / K); 3214 out->print_cr(" Committed size : %7zu KB", CodeCache::capacity() / K); 3215 out->print_cr(" Unallocated capacity : %7zu KB", CodeCache::unallocated_capacity() / K); 3216 out->cr(); 3217 } 3218 3219 // Note: tty_lock must not be held upon entry to this function. 3220 // Print functions called from herein do "micro-locking" on tty_lock. 3221 // That's a tradeoff which keeps together important blocks of output. 3222 // At the same time, continuous tty_lock hold time is kept in check, 3223 // preventing concurrently printing threads from stalling a long time. 3224 void CompileBroker::print_heapinfo(outputStream* out, const char* function, size_t granularity) { 3225 TimeStamp ts_total; 3226 TimeStamp ts_global; 3227 TimeStamp ts; 3228 3229 bool allFun = !strcmp(function, "all"); 3230 bool aggregate = !strcmp(function, "aggregate") || !strcmp(function, "analyze") || allFun; 3231 bool usedSpace = !strcmp(function, "UsedSpace") || allFun; 3232 bool freeSpace = !strcmp(function, "FreeSpace") || allFun; 3233 bool methodCount = !strcmp(function, "MethodCount") || allFun; 3234 bool methodSpace = !strcmp(function, "MethodSpace") || allFun; 3235 bool methodAge = !strcmp(function, "MethodAge") || allFun; 3236 bool methodNames = !strcmp(function, "MethodNames") || allFun; 3237 bool discard = !strcmp(function, "discard") || allFun; 3238 3239 if (out == nullptr) { 3240 out = tty; 3241 } 3242 3243 if (!(aggregate || usedSpace || freeSpace || methodCount || methodSpace || methodAge || methodNames || discard)) { 3244 out->print_cr("\n__ CodeHeapStateAnalytics: Function %s is not supported", function); 3245 out->cr(); 3246 return; 3247 } 3248 3249 ts_total.update(); // record starting point 3250 3251 if (aggregate) { 3252 print_info(out); 3253 } 3254 3255 // We hold the CodeHeapStateAnalytics_lock all the time, from here until we leave this function. 3256 // That prevents other threads from destroying (making inconsistent) our view on the CodeHeap. 3257 // When we request individual parts of the analysis via the jcmd interface, it is possible 3258 // that in between another thread (another jcmd user or the vm running into CodeCache OOM) 3259 // updated the aggregated data. We will then see a modified, but again consistent, view 3260 // on the CodeHeap. That's a tolerable tradeoff we have to accept because we can't hold 3261 // a lock across user interaction. 3262 3263 // We should definitely acquire this lock before acquiring Compile_lock and CodeCache_lock. 3264 // CodeHeapStateAnalytics_lock may be held by a concurrent thread for a long time, 3265 // leading to an unnecessarily long hold time of the other locks we acquired before. 3266 ts.update(); // record starting point 3267 MutexLocker mu0(CodeHeapStateAnalytics_lock, Mutex::_safepoint_check_flag); 3268 out->print_cr("\n__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________\n", ts.seconds()); 3269 3270 // Holding the CodeCache_lock protects from concurrent alterations of the CodeCache. 3271 // Unfortunately, such protection is not sufficient: 3272 // When a new nmethod is created via ciEnv::register_method(), the 3273 // Compile_lock is taken first. After some initializations, 3274 // nmethod::new_nmethod() takes over, grabbing the CodeCache_lock 3275 // immediately (after finalizing the oop references). To lock out concurrent 3276 // modifiers, we have to grab both locks as well in the described sequence. 3277 // 3278 // If we serve an "allFun" call, it is beneficial to hold CodeCache_lock and Compile_lock 3279 // for the entire duration of aggregation and printing. That makes sure we see 3280 // a consistent picture and do not run into issues caused by concurrent alterations. 3281 bool should_take_Compile_lock = !SafepointSynchronize::is_at_safepoint() && 3282 !Compile_lock->owned_by_self(); 3283 bool should_take_CodeCache_lock = !SafepointSynchronize::is_at_safepoint() && 3284 !CodeCache_lock->owned_by_self(); 3285 bool take_global_lock_1 = allFun && should_take_Compile_lock; 3286 bool take_global_lock_2 = allFun && should_take_CodeCache_lock; 3287 bool take_function_lock_1 = !allFun && should_take_Compile_lock; 3288 bool take_function_lock_2 = !allFun && should_take_CodeCache_lock; 3289 bool take_global_locks = take_global_lock_1 || take_global_lock_2; 3290 bool take_function_locks = take_function_lock_1 || take_function_lock_2; 3291 3292 ts_global.update(); // record starting point 3293 3294 ConditionalMutexLocker mu1(Compile_lock, take_global_lock_1, Mutex::_safepoint_check_flag); 3295 ConditionalMutexLocker mu2(CodeCache_lock, take_global_lock_2, Mutex::_no_safepoint_check_flag); 3296 if (take_global_locks) { 3297 out->print_cr("\n__ Compile & CodeCache (global) lock wait took %10.3f seconds _________\n", ts_global.seconds()); 3298 ts_global.update(); // record starting point 3299 } 3300 3301 if (aggregate) { 3302 ts.update(); // record starting point 3303 ConditionalMutexLocker mu11(Compile_lock, take_function_lock_1, Mutex::_safepoint_check_flag); 3304 ConditionalMutexLocker mu22(CodeCache_lock, take_function_lock_2, Mutex::_no_safepoint_check_flag); 3305 if (take_function_locks) { 3306 out->print_cr("\n__ Compile & CodeCache (function) lock wait took %10.3f seconds _________\n", ts.seconds()); 3307 } 3308 3309 ts.update(); // record starting point 3310 CodeCache::aggregate(out, granularity); 3311 if (take_function_locks) { 3312 out->print_cr("\n__ Compile & CodeCache (function) lock hold took %10.3f seconds _________\n", ts.seconds()); 3313 } 3314 } 3315 3316 if (usedSpace) CodeCache::print_usedSpace(out); 3317 if (freeSpace) CodeCache::print_freeSpace(out); 3318 if (methodCount) CodeCache::print_count(out); 3319 if (methodSpace) CodeCache::print_space(out); 3320 if (methodAge) CodeCache::print_age(out); 3321 if (methodNames) { 3322 if (allFun) { 3323 // print_names() can only be used safely if the locks have been continuously held 3324 // since aggregation begin. That is true only for function "all". 3325 CodeCache::print_names(out); 3326 } else { 3327 out->print_cr("\nCodeHeapStateAnalytics: Function 'MethodNames' is only available as part of function 'all'"); 3328 } 3329 } 3330 if (discard) CodeCache::discard(out); 3331 3332 if (take_global_locks) { 3333 out->print_cr("\n__ Compile & CodeCache (global) lock hold took %10.3f seconds _________\n", ts_global.seconds()); 3334 } 3335 out->print_cr("\n__ CodeHeapStateAnalytics total duration %10.3f seconds _________\n", ts_total.seconds()); 3336 }