1 /* 2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/aotLinkedClassBulkLoader.hpp" 26 #include "cds/cdsConfig.hpp" 27 #include "classfile/javaClasses.inline.hpp" 28 #include "classfile/symbolTable.hpp" 29 #include "classfile/vmClasses.hpp" 30 #include "classfile/vmSymbols.hpp" 31 #include "code/aotCodeCache.hpp" 32 #include "code/codeCache.hpp" 33 #include "code/codeHeapState.hpp" 34 #include "code/dependencyContext.hpp" 35 #include "compiler/compilationLog.hpp" 36 #include "compiler/compilationMemoryStatistic.hpp" 37 #include "compiler/compilationPolicy.hpp" 38 #include "compiler/compileBroker.hpp" 39 #include "compiler/compileLog.hpp" 40 #include "compiler/compilerDefinitions.inline.hpp" 41 #include "compiler/compilerEvent.hpp" 42 #include "compiler/compilerOracle.hpp" 43 #include "compiler/directivesParser.hpp" 44 #include "compiler/recompilationPolicy.hpp" 45 #include "gc/shared/memAllocator.hpp" 46 #include "interpreter/linkResolver.hpp" 47 #include "jfr/jfrEvents.hpp" 48 #include "jvm.h" 49 #include "logging/log.hpp" 50 #include "logging/logStream.hpp" 51 #include "memory/allocation.inline.hpp" 52 #include "memory/resourceArea.hpp" 53 #include "memory/universe.hpp" 54 #include "oops/method.inline.hpp" 55 #include "oops/methodData.hpp" 56 #include "oops/oop.inline.hpp" 57 #include "prims/jvmtiExport.hpp" 58 #include "prims/nativeLookup.hpp" 59 #include "prims/whitebox.hpp" 60 #include "runtime/atomic.hpp" 61 #include "runtime/escapeBarrier.hpp" 62 #include "runtime/globals_extension.hpp" 63 #include "runtime/handles.inline.hpp" 64 #include "runtime/init.hpp" 65 #include "runtime/interfaceSupport.inline.hpp" 66 #include "runtime/java.hpp" 67 #include "runtime/javaCalls.hpp" 68 #include "runtime/jniHandles.inline.hpp" 69 #include "runtime/os.hpp" 70 #include "runtime/perfData.hpp" 71 #include "runtime/safepointVerifiers.hpp" 72 #include "runtime/sharedRuntime.hpp" 73 #include "runtime/threads.hpp" 74 #include "runtime/threadSMR.inline.hpp" 75 #include "runtime/timerTrace.hpp" 76 #include "runtime/vframe.inline.hpp" 77 #include "services/management.hpp" 78 #include "utilities/debug.hpp" 79 #include "utilities/dtrace.hpp" 80 #include "utilities/events.hpp" 81 #include "utilities/formatBuffer.hpp" 82 #include "utilities/macros.hpp" 83 #include "utilities/nonblockingQueue.inline.hpp" 84 #ifdef COMPILER1 85 #include "c1/c1_Compiler.hpp" 86 #endif 87 #ifdef COMPILER2 88 #include "opto/c2compiler.hpp" 89 #endif 90 #if INCLUDE_JVMCI 91 #include "jvmci/jvmciEnv.hpp" 92 #include "jvmci/jvmciRuntime.hpp" 93 #endif 94 95 #ifdef DTRACE_ENABLED 96 97 // Only bother with this argument setup if dtrace is available 98 99 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \ 100 { \ 101 Symbol* klass_name = (method)->klass_name(); \ 102 Symbol* name = (method)->name(); \ 103 Symbol* signature = (method)->signature(); \ 104 HOTSPOT_METHOD_COMPILE_BEGIN( \ 105 (char *) comp_name, strlen(comp_name), \ 106 (char *) klass_name->bytes(), klass_name->utf8_length(), \ 107 (char *) name->bytes(), name->utf8_length(), \ 108 (char *) signature->bytes(), signature->utf8_length()); \ 109 } 110 111 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \ 112 { \ 113 Symbol* klass_name = (method)->klass_name(); \ 114 Symbol* name = (method)->name(); \ 115 Symbol* signature = (method)->signature(); \ 116 HOTSPOT_METHOD_COMPILE_END( \ 117 (char *) comp_name, strlen(comp_name), \ 118 (char *) klass_name->bytes(), klass_name->utf8_length(), \ 119 (char *) name->bytes(), name->utf8_length(), \ 120 (char *) signature->bytes(), signature->utf8_length(), (success)); \ 121 } 122 123 #else // ndef DTRACE_ENABLED 124 125 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) 126 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) 127 128 #endif // ndef DTRACE_ENABLED 129 130 bool CompileBroker::_initialized = false; 131 volatile bool CompileBroker::_should_block = false; 132 volatile int CompileBroker::_print_compilation_warning = 0; 133 volatile jint CompileBroker::_should_compile_new_jobs = run_compilation; 134 135 // The installed compiler(s) 136 AbstractCompiler* CompileBroker::_compilers[2]; 137 138 // The maximum numbers of compiler threads to be determined during startup. 139 int CompileBroker::_c1_count = 0; 140 int CompileBroker::_c2_count = 0; 141 int CompileBroker::_ac_count = 0; 142 143 // An array of compiler names as Java String objects 144 jobject* CompileBroker::_compiler1_objects = nullptr; 145 jobject* CompileBroker::_compiler2_objects = nullptr; 146 jobject* CompileBroker::_ac_objects = nullptr; 147 148 CompileLog** CompileBroker::_compiler1_logs = nullptr; 149 CompileLog** CompileBroker::_compiler2_logs = nullptr; 150 CompileLog** CompileBroker::_ac_logs = nullptr; 151 152 // These counters are used to assign an unique ID to each compilation. 153 volatile jint CompileBroker::_compilation_id = 0; 154 volatile jint CompileBroker::_osr_compilation_id = 0; 155 volatile jint CompileBroker::_native_compilation_id = 0; 156 157 // Performance counters 158 PerfCounter* CompileBroker::_perf_total_compilation = nullptr; 159 PerfCounter* CompileBroker::_perf_osr_compilation = nullptr; 160 PerfCounter* CompileBroker::_perf_standard_compilation = nullptr; 161 162 PerfCounter* CompileBroker::_perf_total_bailout_count = nullptr; 163 PerfCounter* CompileBroker::_perf_total_invalidated_count = nullptr; 164 PerfCounter* CompileBroker::_perf_total_compile_count = nullptr; 165 PerfCounter* CompileBroker::_perf_total_osr_compile_count = nullptr; 166 PerfCounter* CompileBroker::_perf_total_standard_compile_count = nullptr; 167 168 PerfCounter* CompileBroker::_perf_sum_osr_bytes_compiled = nullptr; 169 PerfCounter* CompileBroker::_perf_sum_standard_bytes_compiled = nullptr; 170 PerfCounter* CompileBroker::_perf_sum_nmethod_size = nullptr; 171 PerfCounter* CompileBroker::_perf_sum_nmethod_code_size = nullptr; 172 173 PerfStringVariable* CompileBroker::_perf_last_method = nullptr; 174 PerfStringVariable* CompileBroker::_perf_last_failed_method = nullptr; 175 PerfStringVariable* CompileBroker::_perf_last_invalidated_method = nullptr; 176 PerfVariable* CompileBroker::_perf_last_compile_type = nullptr; 177 PerfVariable* CompileBroker::_perf_last_compile_size = nullptr; 178 PerfVariable* CompileBroker::_perf_last_failed_type = nullptr; 179 PerfVariable* CompileBroker::_perf_last_invalidated_type = nullptr; 180 181 // Timers and counters for generating statistics 182 elapsedTimer CompileBroker::_t_total_compilation; 183 elapsedTimer CompileBroker::_t_osr_compilation; 184 elapsedTimer CompileBroker::_t_standard_compilation; 185 elapsedTimer CompileBroker::_t_invalidated_compilation; 186 elapsedTimer CompileBroker::_t_bailedout_compilation; 187 188 uint CompileBroker::_total_bailout_count = 0; 189 uint CompileBroker::_total_invalidated_count = 0; 190 uint CompileBroker::_total_not_entrant_count = 0; 191 uint CompileBroker::_total_compile_count = 0; 192 uint CompileBroker::_total_osr_compile_count = 0; 193 uint CompileBroker::_total_standard_compile_count = 0; 194 uint CompileBroker::_total_compiler_stopped_count = 0; 195 uint CompileBroker::_total_compiler_restarted_count = 0; 196 197 uint CompileBroker::_sum_osr_bytes_compiled = 0; 198 uint CompileBroker::_sum_standard_bytes_compiled = 0; 199 uint CompileBroker::_sum_nmethod_size = 0; 200 uint CompileBroker::_sum_nmethod_code_size = 0; 201 202 jlong CompileBroker::_peak_compilation_time = 0; 203 204 CompilerStatistics CompileBroker::_stats_per_level[CompLevel_full_optimization]; 205 CompilerStatistics CompileBroker::_aot_stats; 206 CompilerStatistics CompileBroker::_aot_stats_per_level[CompLevel_full_optimization + 1]; 207 208 CompileQueue* CompileBroker::_c2_compile_queue = nullptr; 209 CompileQueue* CompileBroker::_c1_compile_queue = nullptr; 210 CompileQueue* CompileBroker::_ac1_compile_queue = nullptr; 211 CompileQueue* CompileBroker::_ac2_compile_queue = nullptr; 212 213 bool compileBroker_init() { 214 if (LogEvents) { 215 CompilationLog::init(); 216 } 217 218 // init directives stack, adding default directive 219 DirectivesStack::init(); 220 221 if (DirectivesParser::has_file()) { 222 return DirectivesParser::parse_from_flag(); 223 } else if (CompilerDirectivesPrint) { 224 // Print default directive even when no other was added 225 DirectivesStack::print(tty); 226 } 227 228 return true; 229 } 230 231 CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) { 232 CompilerThread* thread = CompilerThread::current(); 233 thread->set_task(task); 234 CompileLog* log = thread->log(); 235 if (log != nullptr && !task->is_unloaded()) task->log_task_start(log); 236 } 237 238 CompileTaskWrapper::~CompileTaskWrapper() { 239 CompilerThread* thread = CompilerThread::current(); 240 CompileTask* task = thread->task(); 241 CompileLog* log = thread->log(); 242 AbstractCompiler* comp = thread->compiler(); 243 if (log != nullptr && !task->is_unloaded()) task->log_task_done(log); 244 thread->set_task(nullptr); 245 thread->set_env(nullptr); 246 if (task->is_blocking()) { 247 bool free_task = false; 248 { 249 MutexLocker notifier(thread, CompileTaskWait_lock); 250 task->mark_complete(); 251 #if INCLUDE_JVMCI 252 if (comp->is_jvmci()) { 253 if (!task->has_waiter()) { 254 // The waiting thread timed out and thus did not delete the task. 255 free_task = true; 256 } 257 task->set_blocking_jvmci_compile_state(nullptr); 258 } 259 #endif 260 if (!free_task) { 261 // Notify the waiting thread that the compilation has completed 262 // so that it can free the task. 263 CompileTaskWait_lock->notify_all(); 264 } 265 } 266 if (free_task) { 267 // The task can only be deleted once the task lock is released. 268 delete task; 269 } 270 } else { 271 task->mark_complete(); 272 273 // By convention, the compiling thread is responsible for deleting 274 // a non-blocking CompileTask. 275 delete task; 276 } 277 } 278 279 /** 280 * Check if a CompilerThread can be removed and update count if requested. 281 */ 282 bool CompileBroker::can_remove(CompilerThread *ct, bool do_it) { 283 assert(UseDynamicNumberOfCompilerThreads, "or shouldn't be here"); 284 if (!ReduceNumberOfCompilerThreads) return false; 285 286 if (RecompilationPolicy::have_recompilation_work()) return false; 287 288 AbstractCompiler *compiler = ct->compiler(); 289 int compiler_count = compiler->num_compiler_threads(); 290 bool c1 = compiler->is_c1(); 291 292 // Keep at least 1 compiler thread of each type. 293 if (compiler_count < 2) return false; 294 295 // Keep thread alive for at least some time. 296 if (ct->idle_time_millis() < (c1 ? 500 : 100)) return false; 297 298 #if INCLUDE_JVMCI 299 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) { 300 // Handles for JVMCI thread objects may get released concurrently. 301 if (do_it) { 302 assert(CompileThread_lock->owner() == ct, "must be holding lock"); 303 } else { 304 // Skip check if it's the last thread and let caller check again. 305 return true; 306 } 307 } 308 #endif 309 310 // We only allow the last compiler thread of each type to get removed. 311 jobject last_compiler = c1 ? compiler1_object(compiler_count - 1) 312 : compiler2_object(compiler_count - 1); 313 if (ct->threadObj() == JNIHandles::resolve_non_null(last_compiler)) { 314 if (do_it) { 315 assert_locked_or_safepoint(CompileThread_lock); // Update must be consistent. 316 compiler->set_num_compiler_threads(compiler_count - 1); 317 #if INCLUDE_JVMCI 318 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) { 319 // Old j.l.Thread object can die when no longer referenced elsewhere. 320 JNIHandles::destroy_global(compiler2_object(compiler_count - 1)); 321 _compiler2_objects[compiler_count - 1] = nullptr; 322 } 323 #endif 324 } 325 return true; 326 } 327 return false; 328 } 329 330 /** 331 * Add a CompileTask to a CompileQueue. 332 */ 333 void CompileQueue::add(CompileTask* task) { 334 assert(_lock->owned_by_self(), "must own lock"); 335 336 task->set_next(nullptr); 337 task->set_prev(nullptr); 338 339 if (_last == nullptr) { 340 // The compile queue is empty. 341 assert(_first == nullptr, "queue is empty"); 342 _first = task; 343 _last = task; 344 } else { 345 // Append the task to the queue. 346 assert(_last->next() == nullptr, "not last"); 347 _last->set_next(task); 348 task->set_prev(_last); 349 _last = task; 350 } 351 ++_size; 352 ++_total_added; 353 if (_size > _peak_size) { 354 _peak_size = _size; 355 } 356 357 // Mark the method as being in the compile queue. 358 task->method()->set_queued_for_compilation(); 359 360 task->mark_queued(os::elapsed_counter()); 361 362 if (CIPrintCompileQueue) { 363 print_tty(); 364 } 365 366 if (LogCompilation && xtty != nullptr) { 367 task->log_task_queued(); 368 } 369 370 if (TrainingData::need_data() && !CDSConfig::is_dumping_final_static_archive()) { 371 CompileTrainingData* ctd = CompileTrainingData::make(task); 372 if (ctd != nullptr) { 373 task->set_training_data(ctd); 374 } 375 } 376 377 // Notify CompilerThreads that a task is available. 378 _lock->notify_all(); 379 } 380 381 void CompileQueue::add_pending(CompileTask* task) { 382 assert(_lock->owned_by_self() == false, "must NOT own lock"); 383 assert(UseLockFreeCompileQueues, ""); 384 task->method()->set_queued_for_compilation(); 385 _queue.push(*task); 386 // FIXME: additional coordination needed? e.g., is it possible for compiler thread to block w/o processing pending tasks? 387 if (is_empty()) { 388 MutexLocker ml(_lock); 389 _lock->notify_all(); 390 } 391 } 392 393 static bool process_pending(CompileTask* task) { 394 // guarantee(task->method()->queued_for_compilation(), ""); 395 if (task->is_unloaded()) { 396 return true; // unloaded 397 } 398 task->method()->set_queued_for_compilation(); // FIXME 399 if (task->method()->pending_queue_processed()) { 400 return true; // already queued 401 } 402 // Mark the method as being in the compile queue. 403 task->method()->set_pending_queue_processed(); 404 if (CompileBroker::compilation_is_complete(task->method(), task->osr_bci(), task->comp_level(), 405 task->requires_online_compilation(), task->compile_reason())) { 406 return true; // already compiled 407 } 408 return false; // active 409 } 410 411 void CompileQueue::transfer_pending() { 412 assert(_lock->owned_by_self(), "must own lock"); 413 414 CompileTask* task; 415 while ((task = _queue.pop()) != nullptr) { 416 bool is_stale = process_pending(task); 417 if (is_stale) { 418 task->set_next(_first_stale); 419 task->set_prev(nullptr); 420 _first_stale = task; 421 } else { 422 add(task); 423 } 424 } 425 } 426 427 /** 428 * Empties compilation queue by deleting all compilation tasks. 429 * Furthermore, the method wakes up all threads that are waiting 430 * on a compilation task to finish. This can happen if background 431 * compilation is disabled. 432 */ 433 void CompileQueue::delete_all() { 434 MutexLocker mu(_lock); 435 transfer_pending(); 436 437 CompileTask* current = _first; 438 439 // Iterate over all tasks in the compile queue 440 while (current != nullptr) { 441 if (!current->is_blocking()) { 442 // Non-blocking task. No one is waiting for it, delete it now. 443 delete current; 444 } else { 445 // Blocking task. By convention, it is the waiters responsibility 446 // to delete the task. We cannot delete it here, because we do not 447 // coordinate with waiters. We will notify the waiters later. 448 } 449 current = current->next(); 450 } 451 _first = nullptr; 452 _last = nullptr; 453 454 // Wake up all blocking task waiters to deal with remaining blocking 455 // tasks. This is not a performance sensitive path, so we do this 456 // unconditionally to simplify coding/testing. 457 { 458 MonitorLocker ml(Thread::current(), CompileTaskWait_lock); 459 ml.notify_all(); 460 } 461 462 // Wake up all threads that block on the queue. 463 _lock->notify_all(); 464 } 465 466 /** 467 * Get the next CompileTask from a CompileQueue 468 */ 469 CompileTask* CompileQueue::get(CompilerThread* thread) { 470 // save methods from RedefineClasses across safepoint 471 // across compile queue lock below. 472 methodHandle save_method; 473 474 MonitorLocker locker(_lock); 475 transfer_pending(); 476 477 RecompilationPolicy::sample_load_average(); 478 479 // If _first is null we have no more compile jobs. There are two reasons for 480 // having no compile jobs: First, we compiled everything we wanted. Second, 481 // we ran out of code cache so compilation has been disabled. In the latter 482 // case we perform code cache sweeps to free memory such that we can re-enable 483 // compilation. 484 while (_first == nullptr) { 485 // Exit loop if compilation is disabled forever 486 if (CompileBroker::is_compilation_disabled_forever()) { 487 return nullptr; 488 } 489 490 AbstractCompiler* compiler = thread->compiler(); 491 guarantee(compiler != nullptr, "Compiler object must exist"); 492 compiler->on_empty_queue(this, thread); 493 if (_first != nullptr) { 494 // The call to on_empty_queue may have temporarily unlocked the MCQ lock 495 // so check again whether any tasks were added to the queue. 496 break; 497 } 498 499 // If we have added stale tasks, there might be waiters that want 500 // the notification these tasks have failed. Normally, this would 501 // be done by a compiler thread that would perform the purge at 502 // the end of some compilation. But, if compile queue is empty, 503 // there is no guarantee compilers would run and do the purge. 504 // Do the purge here and now to unblock the waiters. 505 // Perform this until we run out of stale tasks. 506 while (_first_stale != nullptr) { 507 purge_stale_tasks(); 508 } 509 if (_first != nullptr) { 510 // Purge stale tasks may have transferred some new tasks, 511 // so check again. 512 break; 513 } 514 515 // If there are no compilation tasks and we can compile new jobs 516 // (i.e., there is enough free space in the code cache) there is 517 // no need to invoke the GC. 518 // We need a timed wait here, since compiler threads can exit if compilation 519 // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads 520 // is not critical and we do not want idle compiler threads to wake up too often. 521 locker.wait(5*1000); 522 523 transfer_pending(); // reacquired lock 524 525 if (RecompilationPolicy::have_recompilation_work()) return nullptr; 526 527 if (UseDynamicNumberOfCompilerThreads && _first == nullptr) { 528 // Still nothing to compile. Give caller a chance to stop this thread. 529 if (CompileBroker::can_remove(CompilerThread::current(), false)) return nullptr; 530 } 531 } 532 533 if (CompileBroker::is_compilation_disabled_forever()) { 534 return nullptr; 535 } 536 537 CompileTask* task; 538 { 539 NoSafepointVerifier nsv; 540 task = CompilationPolicy::select_task(this, thread); 541 if (task != nullptr) { 542 task = task->select_for_compilation(); 543 } 544 } 545 546 if (task != nullptr) { 547 // Save method pointers across unlock safepoint. The task is removed from 548 // the compilation queue, which is walked during RedefineClasses. 549 Thread* thread = Thread::current(); 550 save_method = methodHandle(thread, task->method()); 551 552 remove(task); 553 } 554 purge_stale_tasks(); // may temporarily release MCQ lock 555 return task; 556 } 557 558 // Clean & deallocate stale compile tasks. 559 // Temporarily releases MethodCompileQueue lock. 560 void CompileQueue::purge_stale_tasks() { 561 assert(_lock->owned_by_self(), "must own lock"); 562 if (_first_stale != nullptr) { 563 // Stale tasks are purged when MCQ lock is released, 564 // but _first_stale updates are protected by MCQ lock. 565 // Once task processing starts and MCQ lock is released, 566 // other compiler threads can reuse _first_stale. 567 CompileTask* head = _first_stale; 568 _first_stale = nullptr; 569 { 570 MutexUnlocker ul(_lock); 571 for (CompileTask* task = head; task != nullptr; ) { 572 CompileTask* next_task = task->next(); 573 CompileTaskWrapper ctw(task); // Frees the task 574 task->set_failure_reason("stale task"); 575 task = next_task; 576 } 577 } 578 transfer_pending(); // transfer pending after reacquiring MCQ lock 579 } 580 } 581 582 void CompileQueue::remove(CompileTask* task) { 583 assert(_lock->owned_by_self(), "must own lock"); 584 if (task->prev() != nullptr) { 585 task->prev()->set_next(task->next()); 586 } else { 587 // max is the first element 588 assert(task == _first, "Sanity"); 589 _first = task->next(); 590 } 591 592 if (task->next() != nullptr) { 593 task->next()->set_prev(task->prev()); 594 } else { 595 // max is the last element 596 assert(task == _last, "Sanity"); 597 _last = task->prev(); 598 } 599 --_size; 600 ++_total_removed; 601 } 602 603 void CompileQueue::remove_and_mark_stale(CompileTask* task) { 604 assert(_lock->owned_by_self(), "must own lock"); 605 remove(task); 606 607 // Enqueue the task for reclamation (should be done outside MCQ lock) 608 task->set_next(_first_stale); 609 task->set_prev(nullptr); 610 _first_stale = task; 611 } 612 613 // methods in the compile queue need to be marked as used on the stack 614 // so that they don't get reclaimed by Redefine Classes 615 void CompileQueue::mark_on_stack() { 616 for (CompileTask* task = _first; task != nullptr; task = task->next()) { 617 task->mark_on_stack(); 618 } 619 for (CompileTask* task = _queue.first(); !_queue.is_end(task); task = task->next()) { 620 assert(task != nullptr, ""); 621 task->mark_on_stack(); 622 } 623 } 624 625 626 CompileQueue* CompileBroker::compile_queue(int comp_level, bool is_aot) { 627 if (is_c2_compile(comp_level)) return ((is_aot && (_ac_count > 0)) ? _ac2_compile_queue : _c2_compile_queue); 628 if (is_c1_compile(comp_level)) return ((is_aot && (_ac_count > 0)) ? _ac1_compile_queue : _c1_compile_queue); 629 return nullptr; 630 } 631 632 CompileQueue* CompileBroker::c1_compile_queue() { 633 return _c1_compile_queue; 634 } 635 636 CompileQueue* CompileBroker::c2_compile_queue() { 637 return _c2_compile_queue; 638 } 639 640 void CompileBroker::print_compile_queues(outputStream* st) { 641 st->print_cr("Current compiles: "); 642 643 char buf[2000]; 644 int buflen = sizeof(buf); 645 Threads::print_threads_compiling(st, buf, buflen, /* short_form = */ true); 646 647 st->cr(); 648 if (_c1_compile_queue != nullptr) { 649 _c1_compile_queue->print(st); 650 } 651 if (_c2_compile_queue != nullptr) { 652 _c2_compile_queue->print(st); 653 } 654 if (_ac1_compile_queue != nullptr) { 655 _ac1_compile_queue->print(st); 656 } 657 if (_ac2_compile_queue != nullptr) { 658 _ac2_compile_queue->print(st); 659 } 660 } 661 662 void CompileQueue::print(outputStream* st) { 663 assert_locked_or_safepoint(_lock); 664 st->print_cr("%s:", name()); 665 CompileTask* task = _first; 666 if (task == nullptr) { 667 st->print_cr("Empty"); 668 } else { 669 while (task != nullptr) { 670 task->print(st, nullptr, true, true); 671 task = task->next(); 672 } 673 } 674 st->cr(); 675 } 676 677 void CompileQueue::print_tty() { 678 stringStream ss; 679 // Dump the compile queue into a buffer before locking the tty 680 print(&ss); 681 { 682 ttyLocker ttyl; 683 tty->print("%s", ss.freeze()); 684 } 685 } 686 687 CompilerCounters::CompilerCounters() { 688 _current_method[0] = '\0'; 689 _compile_type = CompileBroker::no_compile; 690 } 691 692 #if INCLUDE_JFR && COMPILER2_OR_JVMCI 693 // It appends new compiler phase names to growable array phase_names(a new CompilerPhaseType mapping 694 // in compiler/compilerEvent.cpp) and registers it with its serializer. 695 // 696 // c2 uses explicit CompilerPhaseType idToPhase mapping in opto/phasetype.hpp, 697 // so if c2 is used, it should be always registered first. 698 // This function is called during vm initialization. 699 static void register_jfr_phasetype_serializer(CompilerType compiler_type) { 700 ResourceMark rm; 701 static bool first_registration = true; 702 if (compiler_type == compiler_jvmci) { 703 CompilerEvent::PhaseEvent::get_phase_id("NOT_A_PHASE_NAME", false, false, false); 704 first_registration = false; 705 #ifdef COMPILER2 706 } else if (compiler_type == compiler_c2) { 707 assert(first_registration, "invariant"); // c2 must be registered first. 708 for (int i = 0; i < PHASE_NUM_TYPES; i++) { 709 const char* phase_name = CompilerPhaseTypeHelper::to_description((CompilerPhaseType) i); 710 CompilerEvent::PhaseEvent::get_phase_id(phase_name, false, false, false); 711 } 712 first_registration = false; 713 #endif // COMPILER2 714 } 715 } 716 #endif // INCLUDE_JFR && COMPILER2_OR_JVMCI 717 718 // ------------------------------------------------------------------ 719 // CompileBroker::compilation_init 720 // 721 // Initialize the Compilation object 722 void CompileBroker::compilation_init(JavaThread* THREAD) { 723 // No need to initialize compilation system if we do not use it. 724 if (!UseCompiler) { 725 return; 726 } 727 // Set the interface to the current compiler(s). 728 _c1_count = CompilationPolicy::c1_count(); 729 _c2_count = CompilationPolicy::c2_count(); 730 _ac_count = CompilationPolicy::ac_count(); 731 732 #if INCLUDE_JVMCI 733 if (EnableJVMCI) { 734 // This is creating a JVMCICompiler singleton. 735 JVMCICompiler* jvmci = new JVMCICompiler(); 736 737 if (UseJVMCICompiler) { 738 _compilers[1] = jvmci; 739 if (FLAG_IS_DEFAULT(JVMCIThreads)) { 740 if (BootstrapJVMCI) { 741 // JVMCI will bootstrap so give it more threads 742 _c2_count = MIN2(32, os::active_processor_count()); 743 } 744 } else { 745 _c2_count = JVMCIThreads; 746 } 747 if (FLAG_IS_DEFAULT(JVMCIHostThreads)) { 748 } else { 749 #ifdef COMPILER1 750 _c1_count = JVMCIHostThreads; 751 #endif // COMPILER1 752 } 753 } 754 } 755 #endif // INCLUDE_JVMCI 756 757 #ifdef COMPILER1 758 if (_c1_count > 0) { 759 _compilers[0] = new Compiler(); 760 } 761 #endif // COMPILER1 762 763 #ifdef COMPILER2 764 if (true JVMCI_ONLY( && !UseJVMCICompiler)) { 765 if (_c2_count > 0) { 766 _compilers[1] = new C2Compiler(); 767 // Register c2 first as c2 CompilerPhaseType idToPhase mapping is explicit. 768 // idToPhase mapping for c2 is in opto/phasetype.hpp 769 JFR_ONLY(register_jfr_phasetype_serializer(compiler_c2);) 770 } 771 } 772 #endif // COMPILER2 773 774 #if INCLUDE_JVMCI 775 // Register after c2 registration. 776 // JVMCI CompilerPhaseType idToPhase mapping is dynamic. 777 if (EnableJVMCI) { 778 JFR_ONLY(register_jfr_phasetype_serializer(compiler_jvmci);) 779 } 780 #endif // INCLUDE_JVMCI 781 782 if (CompilerOracle::should_collect_memstat()) { 783 CompilationMemoryStatistic::initialize(); 784 } 785 786 // Start the compiler thread(s) 787 init_compiler_threads(); 788 // totalTime performance counter is always created as it is required 789 // by the implementation of java.lang.management.CompilationMXBean. 790 { 791 // Ensure OOM leads to vm_exit_during_initialization. 792 EXCEPTION_MARK; 793 _perf_total_compilation = 794 PerfDataManager::create_counter(JAVA_CI, "totalTime", 795 PerfData::U_Ticks, CHECK); 796 } 797 798 if (UsePerfData) { 799 800 EXCEPTION_MARK; 801 802 // create the jvmstat performance counters 803 _perf_osr_compilation = 804 PerfDataManager::create_counter(SUN_CI, "osrTime", 805 PerfData::U_Ticks, CHECK); 806 807 _perf_standard_compilation = 808 PerfDataManager::create_counter(SUN_CI, "standardTime", 809 PerfData::U_Ticks, CHECK); 810 811 _perf_total_bailout_count = 812 PerfDataManager::create_counter(SUN_CI, "totalBailouts", 813 PerfData::U_Events, CHECK); 814 815 _perf_total_invalidated_count = 816 PerfDataManager::create_counter(SUN_CI, "totalInvalidates", 817 PerfData::U_Events, CHECK); 818 819 _perf_total_compile_count = 820 PerfDataManager::create_counter(SUN_CI, "totalCompiles", 821 PerfData::U_Events, CHECK); 822 _perf_total_osr_compile_count = 823 PerfDataManager::create_counter(SUN_CI, "osrCompiles", 824 PerfData::U_Events, CHECK); 825 826 _perf_total_standard_compile_count = 827 PerfDataManager::create_counter(SUN_CI, "standardCompiles", 828 PerfData::U_Events, CHECK); 829 830 _perf_sum_osr_bytes_compiled = 831 PerfDataManager::create_counter(SUN_CI, "osrBytes", 832 PerfData::U_Bytes, CHECK); 833 834 _perf_sum_standard_bytes_compiled = 835 PerfDataManager::create_counter(SUN_CI, "standardBytes", 836 PerfData::U_Bytes, CHECK); 837 838 _perf_sum_nmethod_size = 839 PerfDataManager::create_counter(SUN_CI, "nmethodSize", 840 PerfData::U_Bytes, CHECK); 841 842 _perf_sum_nmethod_code_size = 843 PerfDataManager::create_counter(SUN_CI, "nmethodCodeSize", 844 PerfData::U_Bytes, CHECK); 845 846 _perf_last_method = 847 PerfDataManager::create_string_variable(SUN_CI, "lastMethod", 848 CompilerCounters::cmname_buffer_length, 849 "", CHECK); 850 851 _perf_last_failed_method = 852 PerfDataManager::create_string_variable(SUN_CI, "lastFailedMethod", 853 CompilerCounters::cmname_buffer_length, 854 "", CHECK); 855 856 _perf_last_invalidated_method = 857 PerfDataManager::create_string_variable(SUN_CI, "lastInvalidatedMethod", 858 CompilerCounters::cmname_buffer_length, 859 "", CHECK); 860 861 _perf_last_compile_type = 862 PerfDataManager::create_variable(SUN_CI, "lastType", 863 PerfData::U_None, 864 (jlong)CompileBroker::no_compile, 865 CHECK); 866 867 _perf_last_compile_size = 868 PerfDataManager::create_variable(SUN_CI, "lastSize", 869 PerfData::U_Bytes, 870 (jlong)CompileBroker::no_compile, 871 CHECK); 872 873 874 _perf_last_failed_type = 875 PerfDataManager::create_variable(SUN_CI, "lastFailedType", 876 PerfData::U_None, 877 (jlong)CompileBroker::no_compile, 878 CHECK); 879 880 _perf_last_invalidated_type = 881 PerfDataManager::create_variable(SUN_CI, "lastInvalidatedType", 882 PerfData::U_None, 883 (jlong)CompileBroker::no_compile, 884 CHECK); 885 } 886 887 log_info(aot, codecache, init)("CompileBroker is initialized"); 888 _initialized = true; 889 } 890 891 Handle CompileBroker::create_thread_oop(const char* name, TRAPS) { 892 Handle thread_oop = JavaThread::create_system_thread_object(name, CHECK_NH); 893 return thread_oop; 894 } 895 896 void TrainingReplayThread::training_replay_thread_entry(JavaThread* thread, TRAPS) { 897 CompilationPolicy::replay_training_at_init_loop(thread); 898 } 899 900 #if defined(ASSERT) && COMPILER2_OR_JVMCI 901 // Entry for DeoptimizeObjectsALotThread. The threads are started in 902 // CompileBroker::init_compiler_threads() iff DeoptimizeObjectsALot is enabled 903 void DeoptimizeObjectsALotThread::deopt_objs_alot_thread_entry(JavaThread* thread, TRAPS) { 904 DeoptimizeObjectsALotThread* dt = ((DeoptimizeObjectsALotThread*) thread); 905 bool enter_single_loop; 906 { 907 MonitorLocker ml(dt, EscapeBarrier_lock, Mutex::_no_safepoint_check_flag); 908 static int single_thread_count = 0; 909 enter_single_loop = single_thread_count++ < DeoptimizeObjectsALotThreadCountSingle; 910 } 911 if (enter_single_loop) { 912 dt->deoptimize_objects_alot_loop_single(); 913 } else { 914 dt->deoptimize_objects_alot_loop_all(); 915 } 916 } 917 918 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each 919 // barrier targets a single thread which is selected round robin. 920 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_single() { 921 HandleMark hm(this); 922 while (true) { 923 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *deoptee_thread = jtiwh.next(); ) { 924 { // Begin new scope for escape barrier 925 HandleMarkCleaner hmc(this); 926 ResourceMark rm(this); 927 EscapeBarrier eb(true, this, deoptee_thread); 928 eb.deoptimize_objects(100); 929 } 930 // Now sleep after the escape barriers destructor resumed deoptee_thread. 931 sleep(DeoptimizeObjectsALotInterval); 932 } 933 } 934 } 935 936 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each 937 // barrier targets all java threads in the vm at once. 938 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_all() { 939 HandleMark hm(this); 940 while (true) { 941 { // Begin new scope for escape barrier 942 HandleMarkCleaner hmc(this); 943 ResourceMark rm(this); 944 EscapeBarrier eb(true, this); 945 eb.deoptimize_objects_all_threads(); 946 } 947 // Now sleep after the escape barriers destructor resumed the java threads. 948 sleep(DeoptimizeObjectsALotInterval); 949 } 950 } 951 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI 952 953 954 JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, CompileQueue* queue, AbstractCompiler* comp, JavaThread* THREAD) { 955 Handle thread_oop(THREAD, JNIHandles::resolve_non_null(thread_handle)); 956 957 if (java_lang_Thread::thread(thread_oop()) != nullptr) { 958 assert(type == compiler_t, "should only happen with reused compiler threads"); 959 // The compiler thread hasn't actually exited yet so don't try to reuse it 960 return nullptr; 961 } 962 963 JavaThread* new_thread = nullptr; 964 switch (type) { 965 case compiler_t: 966 assert(comp != nullptr, "Compiler instance missing."); 967 if (!InjectCompilerCreationFailure || comp->num_compiler_threads() == 0) { 968 CompilerCounters* counters = new CompilerCounters(); 969 new_thread = new CompilerThread(queue, counters); 970 } 971 break; 972 #if defined(ASSERT) && COMPILER2_OR_JVMCI 973 case deoptimizer_t: 974 new_thread = new DeoptimizeObjectsALotThread(); 975 break; 976 #endif // ASSERT 977 case training_replay_t: 978 new_thread = new TrainingReplayThread(); 979 break; 980 default: 981 ShouldNotReachHere(); 982 } 983 984 // At this point the new CompilerThread data-races with this startup 985 // thread (which is the main thread and NOT the VM thread). 986 // This means Java bytecodes being executed at startup can 987 // queue compile jobs which will run at whatever default priority the 988 // newly created CompilerThread runs at. 989 990 991 // At this point it may be possible that no osthread was created for the 992 // JavaThread due to lack of resources. We will handle that failure below. 993 // Also check new_thread so that static analysis is happy. 994 if (new_thread != nullptr && new_thread->osthread() != nullptr) { 995 996 if (type == compiler_t) { 997 CompilerThread::cast(new_thread)->set_compiler(comp); 998 } 999 1000 // Note that we cannot call os::set_priority because it expects Java 1001 // priorities and we are *explicitly* using OS priorities so that it's 1002 // possible to set the compiler thread priority higher than any Java 1003 // thread. 1004 1005 int native_prio = CompilerThreadPriority; 1006 if (native_prio == -1) { 1007 if (UseCriticalCompilerThreadPriority) { 1008 native_prio = os::java_to_os_priority[CriticalPriority]; 1009 } else { 1010 native_prio = os::java_to_os_priority[NearMaxPriority]; 1011 } 1012 } 1013 os::set_native_priority(new_thread, native_prio); 1014 1015 // Note that this only sets the JavaThread _priority field, which by 1016 // definition is limited to Java priorities and not OS priorities. 1017 JavaThread::start_internal_daemon(THREAD, new_thread, thread_oop, NearMaxPriority); 1018 1019 } else { // osthread initialization failure 1020 if (UseDynamicNumberOfCompilerThreads && type == compiler_t 1021 && comp->num_compiler_threads() > 0) { 1022 // The new thread is not known to Thread-SMR yet so we can just delete. 1023 delete new_thread; 1024 return nullptr; 1025 } else { 1026 vm_exit_during_initialization("java.lang.OutOfMemoryError", 1027 os::native_thread_creation_failed_msg()); 1028 } 1029 } 1030 1031 os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS) 1032 1033 return new_thread; 1034 } 1035 1036 static bool trace_compiler_threads() { 1037 LogTarget(Debug, jit, thread) lt; 1038 return TraceCompilerThreads || lt.is_enabled(); 1039 } 1040 1041 static jobject create_compiler_thread(AbstractCompiler* compiler, int i, TRAPS) { 1042 char name_buffer[256]; 1043 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", compiler->name(), i); 1044 Handle thread_oop = JavaThread::create_system_thread_object(name_buffer, CHECK_NULL); 1045 return JNIHandles::make_global(thread_oop); 1046 } 1047 1048 static void print_compiler_threads(stringStream& msg) { 1049 if (TraceCompilerThreads) { 1050 tty->print_cr("%7d %s", (int)tty->time_stamp().milliseconds(), msg.as_string()); 1051 } 1052 LogTarget(Debug, jit, thread) lt; 1053 if (lt.is_enabled()) { 1054 LogStream ls(lt); 1055 ls.print_cr("%s", msg.as_string()); 1056 } 1057 } 1058 1059 static void print_compiler_thread(JavaThread *ct) { 1060 if (trace_compiler_threads()) { 1061 ResourceMark rm; 1062 ThreadsListHandle tlh; // name() depends on the TLH. 1063 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1064 stringStream msg; 1065 msg.print("Added initial compiler thread %s", ct->name()); 1066 print_compiler_threads(msg); 1067 } 1068 } 1069 1070 void CompileBroker::init_compiler_threads() { 1071 // Ensure any exceptions lead to vm_exit_during_initialization. 1072 EXCEPTION_MARK; 1073 #if !defined(ZERO) 1074 assert(_c2_count > 0 || _c1_count > 0, "No compilers?"); 1075 #endif // !ZERO 1076 // Initialize the compilation queue 1077 if (_c2_count > 0) { 1078 const char* name = JVMCI_ONLY(UseJVMCICompiler ? "JVMCI compile queue" :) "C2 compile queue"; 1079 _c2_compile_queue = new CompileQueue(name, MethodCompileQueueC2_lock); 1080 _compiler2_objects = NEW_C_HEAP_ARRAY(jobject, _c2_count, mtCompiler); 1081 _compiler2_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c2_count, mtCompiler); 1082 } 1083 if (_c1_count > 0) { 1084 _c1_compile_queue = new CompileQueue("C1 compile queue", MethodCompileQueueC1_lock); 1085 _compiler1_objects = NEW_C_HEAP_ARRAY(jobject, _c1_count, mtCompiler); 1086 _compiler1_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c1_count, mtCompiler); 1087 } 1088 1089 if (_ac_count > 0) { 1090 if (_c1_count > 0) { // C1 is present 1091 _ac1_compile_queue = new CompileQueue("C1 AOT code compile queue", MethodCompileQueueSC1_lock); 1092 } 1093 if (_c2_count > 0) { // C2 is present 1094 _ac2_compile_queue = new CompileQueue("C2 AOT code compile queue", MethodCompileQueueSC2_lock); 1095 } 1096 _ac_objects = NEW_C_HEAP_ARRAY(jobject, _ac_count, mtCompiler); 1097 _ac_logs = NEW_C_HEAP_ARRAY(CompileLog*, _ac_count, mtCompiler); 1098 } 1099 char name_buffer[256]; 1100 1101 for (int i = 0; i < _c2_count; i++) { 1102 // Create a name for our thread. 1103 jobject thread_handle = create_compiler_thread(_compilers[1], i, CHECK); 1104 _compiler2_objects[i] = thread_handle; 1105 _compiler2_logs[i] = nullptr; 1106 1107 if (!UseDynamicNumberOfCompilerThreads || i == 0) { 1108 JavaThread *ct = make_thread(compiler_t, thread_handle, _c2_compile_queue, _compilers[1], THREAD); 1109 assert(ct != nullptr, "should have been handled for initial thread"); 1110 _compilers[1]->set_num_compiler_threads(i + 1); 1111 print_compiler_thread(ct); 1112 } 1113 } 1114 1115 for (int i = 0; i < _c1_count; i++) { 1116 // Create a name for our thread. 1117 jobject thread_handle = create_compiler_thread(_compilers[0], i, CHECK); 1118 _compiler1_objects[i] = thread_handle; 1119 _compiler1_logs[i] = nullptr; 1120 1121 if (!UseDynamicNumberOfCompilerThreads || i == 0) { 1122 JavaThread *ct = make_thread(compiler_t, thread_handle, _c1_compile_queue, _compilers[0], THREAD); 1123 assert(ct != nullptr, "should have been handled for initial thread"); 1124 _compilers[0]->set_num_compiler_threads(i + 1); 1125 print_compiler_thread(ct); 1126 } 1127 } 1128 1129 if (_ac_count > 0) { 1130 int i = 0; 1131 if (_c1_count > 0) { // C1 is present 1132 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d AOT code caching CompilerThread", 1); 1133 Handle thread_oop = create_thread_oop(name_buffer, CHECK); 1134 jobject thread_handle = JNIHandles::make_global(thread_oop); 1135 _ac_objects[i] = thread_handle; 1136 _ac_logs[i] = nullptr; 1137 i++; 1138 1139 JavaThread *ct = make_thread(compiler_t, thread_handle, _ac1_compile_queue, _compilers[0], THREAD); 1140 assert(ct != nullptr, "should have been handled for initial thread"); 1141 print_compiler_thread(ct); 1142 } 1143 if (_c2_count > 0) { // C2 is present 1144 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d AOT code caching CompilerThread", 2); 1145 Handle thread_oop = create_thread_oop(name_buffer, CHECK); 1146 jobject thread_handle = JNIHandles::make_global(thread_oop); 1147 _ac_objects[i] = thread_handle; 1148 _ac_logs[i] = nullptr; 1149 1150 JavaThread *ct = make_thread(compiler_t, thread_handle, _ac2_compile_queue, _compilers[1], THREAD); 1151 assert(ct != nullptr, "should have been handled for initial thread"); 1152 print_compiler_thread(ct); 1153 } 1154 } 1155 1156 if (UsePerfData) { 1157 PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, _c1_count + _c2_count, CHECK); 1158 } 1159 1160 #if defined(ASSERT) && COMPILER2_OR_JVMCI 1161 if (DeoptimizeObjectsALot) { 1162 // Initialize and start the object deoptimizer threads 1163 const int total_count = DeoptimizeObjectsALotThreadCountSingle + DeoptimizeObjectsALotThreadCountAll; 1164 for (int count = 0; count < total_count; count++) { 1165 Handle thread_oop = JavaThread::create_system_thread_object("Deoptimize objects a lot single mode", CHECK); 1166 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); 1167 make_thread(deoptimizer_t, thread_handle, nullptr, nullptr, THREAD); 1168 } 1169 } 1170 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI 1171 } 1172 1173 void CompileBroker::init_training_replay() { 1174 // Ensure any exceptions lead to vm_exit_during_initialization. 1175 EXCEPTION_MARK; 1176 if (TrainingData::have_data()) { 1177 Handle thread_oop = create_thread_oop("Training replay thread", CHECK); 1178 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); 1179 make_thread(training_replay_t, thread_handle, nullptr, nullptr, THREAD); 1180 } 1181 } 1182 1183 void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) { 1184 1185 int old_c2_count = 0, new_c2_count = 0, old_c1_count = 0, new_c1_count = 0; 1186 const int c2_tasks_per_thread = 2, c1_tasks_per_thread = 4; 1187 1188 // Quick check if we already have enough compiler threads without taking the lock. 1189 // Numbers may change concurrently, so we read them again after we have the lock. 1190 if (_c2_compile_queue != nullptr) { 1191 old_c2_count = get_c2_thread_count(); 1192 new_c2_count = MIN2(_c2_count, _c2_compile_queue->size() / c2_tasks_per_thread); 1193 } 1194 if (_c1_compile_queue != nullptr) { 1195 old_c1_count = get_c1_thread_count(); 1196 new_c1_count = MIN2(_c1_count, _c1_compile_queue->size() / c1_tasks_per_thread); 1197 } 1198 if (new_c2_count <= old_c2_count && new_c1_count <= old_c1_count) return; 1199 1200 // Now, we do the more expensive operations. 1201 julong free_memory = os::free_memory(); 1202 // If SegmentedCodeCache is off, both values refer to the single heap (with type CodeBlobType::All). 1203 size_t available_cc_np = CodeCache::unallocated_capacity(CodeBlobType::MethodNonProfiled), 1204 available_cc_p = CodeCache::unallocated_capacity(CodeBlobType::MethodProfiled); 1205 1206 // Only attempt to start additional threads if the lock is free. 1207 if (!CompileThread_lock->try_lock()) return; 1208 1209 if (_c2_compile_queue != nullptr) { 1210 old_c2_count = get_c2_thread_count(); 1211 new_c2_count = MIN4(_c2_count, 1212 _c2_compile_queue->size() / c2_tasks_per_thread, 1213 (int)(free_memory / (200*M)), 1214 (int)(available_cc_np / (128*K))); 1215 1216 for (int i = old_c2_count; i < new_c2_count; i++) { 1217 #if INCLUDE_JVMCI 1218 if (UseJVMCICompiler && !UseJVMCINativeLibrary && _compiler2_objects[i] == nullptr) { 1219 // Native compiler threads as used in C1/C2 can reuse the j.l.Thread objects as their 1220 // existence is completely hidden from the rest of the VM (and those compiler threads can't 1221 // call Java code to do the creation anyway). 1222 // 1223 // For pure Java JVMCI we have to create new j.l.Thread objects as they are visible and we 1224 // can see unexpected thread lifecycle transitions if we bind them to new JavaThreads. For 1225 // native library JVMCI it's preferred to use the C1/C2 strategy as this avoids unnecessary 1226 // coupling with Java. 1227 if (!THREAD->can_call_java()) break; 1228 char name_buffer[256]; 1229 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", _compilers[1]->name(), i); 1230 Handle thread_oop; 1231 { 1232 // We have to give up the lock temporarily for the Java calls. 1233 MutexUnlocker mu(CompileThread_lock); 1234 thread_oop = JavaThread::create_system_thread_object(name_buffer, THREAD); 1235 } 1236 if (HAS_PENDING_EXCEPTION) { 1237 if (trace_compiler_threads()) { 1238 ResourceMark rm; 1239 stringStream msg; 1240 msg.print_cr("JVMCI compiler thread creation failed:"); 1241 PENDING_EXCEPTION->print_on(&msg); 1242 print_compiler_threads(msg); 1243 } 1244 CLEAR_PENDING_EXCEPTION; 1245 break; 1246 } 1247 // Check if another thread has beaten us during the Java calls. 1248 if (get_c2_thread_count() != i) break; 1249 jobject thread_handle = JNIHandles::make_global(thread_oop); 1250 assert(compiler2_object(i) == nullptr, "Old one must be released!"); 1251 _compiler2_objects[i] = thread_handle; 1252 } 1253 #endif 1254 guarantee(compiler2_object(i) != nullptr, "Thread oop must exist"); 1255 JavaThread *ct = make_thread(compiler_t, compiler2_object(i), _c2_compile_queue, _compilers[1], THREAD); 1256 if (ct == nullptr) break; 1257 _compilers[1]->set_num_compiler_threads(i + 1); 1258 if (trace_compiler_threads()) { 1259 ResourceMark rm; 1260 ThreadsListHandle tlh; // name() depends on the TLH. 1261 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1262 stringStream msg; 1263 msg.print("Added compiler thread %s (free memory: %dMB, available non-profiled code cache: %dMB)", 1264 ct->name(), (int)(free_memory/M), (int)(available_cc_np/M)); 1265 print_compiler_threads(msg); 1266 } 1267 } 1268 } 1269 1270 if (_c1_compile_queue != nullptr) { 1271 old_c1_count = get_c1_thread_count(); 1272 new_c1_count = MIN4(_c1_count, 1273 _c1_compile_queue->size() / c1_tasks_per_thread, 1274 (int)(free_memory / (100*M)), 1275 (int)(available_cc_p / (128*K))); 1276 1277 for (int i = old_c1_count; i < new_c1_count; i++) { 1278 JavaThread *ct = make_thread(compiler_t, compiler1_object(i), _c1_compile_queue, _compilers[0], THREAD); 1279 if (ct == nullptr) break; 1280 _compilers[0]->set_num_compiler_threads(i + 1); 1281 if (trace_compiler_threads()) { 1282 ResourceMark rm; 1283 ThreadsListHandle tlh; // name() depends on the TLH. 1284 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1285 stringStream msg; 1286 msg.print("Added compiler thread %s (free memory: %dMB, available profiled code cache: %dMB)", 1287 ct->name(), (int)(free_memory/M), (int)(available_cc_p/M)); 1288 print_compiler_threads(msg); 1289 } 1290 } 1291 } 1292 1293 CompileThread_lock->unlock(); 1294 } 1295 1296 1297 /** 1298 * Set the methods on the stack as on_stack so that redefine classes doesn't 1299 * reclaim them. This method is executed at a safepoint. 1300 */ 1301 void CompileBroker::mark_on_stack() { 1302 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 1303 // Since we are at a safepoint, we do not need a lock to access 1304 // the compile queues. 1305 if (_c2_compile_queue != nullptr) { 1306 _c2_compile_queue->mark_on_stack(); 1307 } 1308 if (_c1_compile_queue != nullptr) { 1309 _c1_compile_queue->mark_on_stack(); 1310 } 1311 if (_ac1_compile_queue != nullptr) { 1312 _ac1_compile_queue->mark_on_stack(); 1313 } 1314 if (_ac2_compile_queue != nullptr) { 1315 _ac2_compile_queue->mark_on_stack(); 1316 } 1317 } 1318 1319 // ------------------------------------------------------------------ 1320 // CompileBroker::compile_method 1321 // 1322 // Request compilation of a method. 1323 void CompileBroker::compile_method_base(const methodHandle& method, 1324 int osr_bci, 1325 int comp_level, 1326 int hot_count, 1327 CompileTask::CompileReason compile_reason, 1328 bool requires_online_compilation, 1329 bool blocking, 1330 Thread* thread) { 1331 guarantee(!method->is_abstract(), "cannot compile abstract methods"); 1332 assert(method->method_holder()->is_instance_klass(), 1333 "sanity check"); 1334 assert(!method->method_holder()->is_not_initialized() || 1335 compile_reason == CompileTask::Reason_Preload || 1336 compile_reason == CompileTask::Reason_Precompile || 1337 compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized"); 1338 assert(!method->is_method_handle_intrinsic(), "do not enqueue these guys"); 1339 1340 if (CIPrintRequests) { 1341 tty->print("request: "); 1342 method->print_short_name(tty); 1343 if (osr_bci != InvocationEntryBci) { 1344 tty->print(" osr_bci: %d", osr_bci); 1345 } 1346 tty->print(" level: %d comment: %s count: %d", comp_level, CompileTask::reason_name(compile_reason), hot_count); 1347 if (hot_count > 0) { 1348 tty->print(" hot: yes"); 1349 } 1350 tty->cr(); 1351 } 1352 1353 // A request has been made for compilation. Before we do any 1354 // real work, check to see if the method has been compiled 1355 // in the meantime with a definitive result. 1356 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1357 return; 1358 } 1359 1360 #ifndef PRODUCT 1361 if (osr_bci != -1 && !FLAG_IS_DEFAULT(OSROnlyBCI)) { 1362 if ((OSROnlyBCI > 0) ? (OSROnlyBCI != osr_bci) : (-OSROnlyBCI == osr_bci)) { 1363 // Positive OSROnlyBCI means only compile that bci. Negative means don't compile that BCI. 1364 return; 1365 } 1366 } 1367 #endif 1368 1369 // If this method is already in the compile queue, then 1370 // we do not block the current thread. 1371 if (compilation_is_in_queue(method)) { 1372 // We may want to decay our counter a bit here to prevent 1373 // multiple denied requests for compilation. This is an 1374 // open compilation policy issue. Note: The other possibility, 1375 // in the case that this is a blocking compile request, is to have 1376 // all subsequent blocking requesters wait for completion of 1377 // ongoing compiles. Note that in this case we'll need a protocol 1378 // for freeing the associated compile tasks. [Or we could have 1379 // a single static monitor on which all these waiters sleep.] 1380 return; 1381 } 1382 1383 // Tiered policy requires MethodCounters to exist before adding a method to 1384 // the queue. Create if we don't have them yet. 1385 if (compile_reason != CompileTask::Reason_Preload) { 1386 method->get_method_counters(thread); 1387 } 1388 1389 AOTCodeEntry* aot_code_entry = find_aot_code_entry(method, osr_bci, comp_level, compile_reason, requires_online_compilation); 1390 bool is_aot = (aot_code_entry != nullptr); 1391 1392 // Outputs from the following MutexLocker block: 1393 CompileTask* task = nullptr; 1394 CompileQueue* queue = compile_queue(comp_level, is_aot); 1395 1396 // Acquire our lock. 1397 { 1398 ConditionalMutexLocker locker(thread, queue->lock(), !UseLockFreeCompileQueues); 1399 1400 // Make sure the method has not slipped into the queues since 1401 // last we checked; note that those checks were "fast bail-outs". 1402 // Here we need to be more careful, see 14012000 below. 1403 if (compilation_is_in_queue(method)) { 1404 return; 1405 } 1406 1407 // We need to check again to see if the compilation has 1408 // completed. A previous compilation may have registered 1409 // some result. 1410 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1411 return; 1412 } 1413 1414 // We now know that this compilation is not pending, complete, 1415 // or prohibited. Assign a compile_id to this compilation 1416 // and check to see if it is in our [Start..Stop) range. 1417 int compile_id = assign_compile_id(method, osr_bci); 1418 if (compile_id == 0) { 1419 // The compilation falls outside the allowed range. 1420 return; 1421 } 1422 1423 #if INCLUDE_JVMCI 1424 if (UseJVMCICompiler && blocking) { 1425 // Don't allow blocking compiles for requests triggered by JVMCI. 1426 if (thread->is_Compiler_thread()) { 1427 blocking = false; 1428 } 1429 1430 // In libjvmci, JVMCI initialization should not deadlock with other threads 1431 if (!UseJVMCINativeLibrary) { 1432 // Don't allow blocking compiles if inside a class initializer or while performing class loading 1433 vframeStream vfst(JavaThread::cast(thread)); 1434 for (; !vfst.at_end(); vfst.next()) { 1435 if (vfst.method()->is_static_initializer() || 1436 (vfst.method()->method_holder()->is_subclass_of(vmClasses::ClassLoader_klass()) && 1437 vfst.method()->name() == vmSymbols::loadClass_name())) { 1438 blocking = false; 1439 break; 1440 } 1441 } 1442 1443 // Don't allow blocking compilation requests to JVMCI 1444 // if JVMCI itself is not yet initialized 1445 if (!JVMCI::is_compiler_initialized() && compiler(comp_level)->is_jvmci()) { 1446 blocking = false; 1447 } 1448 } 1449 1450 // Don't allow blocking compilation requests if we are in JVMCIRuntime::shutdown 1451 // to avoid deadlock between compiler thread(s) and threads run at shutdown 1452 // such as the DestroyJavaVM thread. 1453 if (JVMCI::in_shutdown()) { 1454 blocking = false; 1455 } 1456 } 1457 #endif // INCLUDE_JVMCI 1458 1459 // We will enter the compilation in the queue. 1460 // 14012000: Note that this sets the queued_for_compile bits in 1461 // the target method. We can now reason that a method cannot be 1462 // queued for compilation more than once, as follows: 1463 // Before a thread queues a task for compilation, it first acquires 1464 // the compile queue lock, then checks if the method's queued bits 1465 // are set or it has already been compiled. Thus there can not be two 1466 // instances of a compilation task for the same method on the 1467 // compilation queue. Consider now the case where the compilation 1468 // thread has already removed a task for that method from the queue 1469 // and is in the midst of compiling it. In this case, the 1470 // queued_for_compile bits must be set in the method (and these 1471 // will be visible to the current thread, since the bits were set 1472 // under protection of the compile queue lock, which we hold now. 1473 // When the compilation completes, the compiler thread first sets 1474 // the compilation result and then clears the queued_for_compile 1475 // bits. Neither of these actions are protected by a barrier (or done 1476 // under the protection of a lock), so the only guarantee we have 1477 // (on machines with TSO (Total Store Order)) is that these values 1478 // will update in that order. As a result, the only combinations of 1479 // these bits that the current thread will see are, in temporal order: 1480 // <RESULT, QUEUE> : 1481 // <0, 1> : in compile queue, but not yet compiled 1482 // <1, 1> : compiled but queue bit not cleared 1483 // <1, 0> : compiled and queue bit cleared 1484 // Because we first check the queue bits then check the result bits, 1485 // we are assured that we cannot introduce a duplicate task. 1486 // Note that if we did the tests in the reverse order (i.e. check 1487 // result then check queued bit), we could get the result bit before 1488 // the compilation completed, and the queue bit after the compilation 1489 // completed, and end up introducing a "duplicate" (redundant) task. 1490 // In that case, the compiler thread should first check if a method 1491 // has already been compiled before trying to compile it. 1492 // NOTE: in the event that there are multiple compiler threads and 1493 // there is de-optimization/recompilation, things will get hairy, 1494 // and in that case it's best to protect both the testing (here) of 1495 // these bits, and their updating (here and elsewhere) under a 1496 // common lock. 1497 task = create_compile_task(queue, 1498 compile_id, method, 1499 osr_bci, comp_level, 1500 hot_count, aot_code_entry, compile_reason, 1501 requires_online_compilation, blocking); 1502 1503 if (task->is_aot_load() && (_ac_count > 0)) { 1504 // Put it on AOT code caching queue 1505 queue = is_c1_compile(comp_level) ? _ac1_compile_queue : _ac2_compile_queue; 1506 } 1507 1508 if (UseLockFreeCompileQueues) { 1509 assert(queue->lock()->owned_by_self() == false, ""); 1510 queue->add_pending(task); 1511 } else { 1512 queue->add(task); 1513 } 1514 } 1515 1516 if (blocking) { 1517 wait_for_completion(task); 1518 } 1519 } 1520 1521 AOTCodeEntry* CompileBroker::find_aot_code_entry(const methodHandle& method, int osr_bci, int comp_level, 1522 CompileTask::CompileReason compile_reason, 1523 bool requires_online_compilation) { 1524 if (requires_online_compilation || compile_reason == CompileTask::Reason_Whitebox) { 1525 return nullptr; // Need normal JIT compilation 1526 } 1527 AOTCodeEntry* aot_code_entry = nullptr; 1528 if (osr_bci == InvocationEntryBci && AOTCodeCache::is_using_code()) { 1529 // Check for AOT preload code first. 1530 if (compile_reason == CompileTask::Reason_Preload) { 1531 aot_code_entry = method->aot_code_entry(); 1532 assert(aot_code_entry != nullptr && aot_code_entry->for_preload(), "sanity"); 1533 } else { 1534 aot_code_entry = AOTCodeCache::find_code_entry(method, comp_level); 1535 } 1536 } 1537 return aot_code_entry; 1538 } 1539 1540 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, 1541 int comp_level, 1542 int hot_count, 1543 bool requires_online_compilation, 1544 CompileTask::CompileReason compile_reason, 1545 TRAPS) { 1546 // Do nothing if compilebroker is not initialized or compiles are submitted on level none 1547 if (!_initialized || comp_level == CompLevel_none) { 1548 return nullptr; 1549 } 1550 1551 #if INCLUDE_JVMCI 1552 if (EnableJVMCI && UseJVMCICompiler && 1553 comp_level == CompLevel_full_optimization && !AOTLinkedClassBulkLoader::class_preloading_finished()) { 1554 return nullptr; 1555 } 1556 #endif 1557 1558 AbstractCompiler *comp = CompileBroker::compiler(comp_level); 1559 assert(comp != nullptr, "Ensure we have a compiler"); 1560 1561 #if INCLUDE_JVMCI 1562 if (comp->is_jvmci() && !JVMCI::can_initialize_JVMCI()) { 1563 // JVMCI compilation is not yet initializable. 1564 return nullptr; 1565 } 1566 #endif 1567 1568 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp); 1569 // CompileBroker::compile_method can trap and can have pending async exception. 1570 nmethod* nm = CompileBroker::compile_method(method, osr_bci, comp_level, hot_count, requires_online_compilation, compile_reason, directive, THREAD); 1571 DirectivesStack::release(directive); 1572 return nm; 1573 } 1574 1575 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, 1576 int comp_level, 1577 int hot_count, 1578 bool requires_online_compilation, 1579 CompileTask::CompileReason compile_reason, 1580 DirectiveSet* directive, 1581 TRAPS) { 1582 1583 // make sure arguments make sense 1584 assert(method->method_holder()->is_instance_klass(), "not an instance method"); 1585 assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range"); 1586 assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods"); 1587 assert(!method->method_holder()->is_not_initialized() || 1588 compile_reason == CompileTask::Reason_Preload || 1589 compile_reason == CompileTask::Reason_Precompile || 1590 compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized"); 1591 // return quickly if possible 1592 bool aot_compilation = (PrecompileCode && PrecompileOnlyAndExit) || 1593 CDSConfig::is_dumping_aot_code(); 1594 if (aot_compilation && !CompileTask::reason_is_precompile(compile_reason)) { 1595 // Skip normal compilations when compiling AOT code 1596 return nullptr; 1597 } 1598 1599 // lock, make sure that the compilation 1600 // isn't prohibited in a straightforward way. 1601 AbstractCompiler* comp = CompileBroker::compiler(comp_level); 1602 if (comp == nullptr || compilation_is_prohibited(method, osr_bci, comp_level, directive->ExcludeOption)) { 1603 return nullptr; 1604 } 1605 1606 if (osr_bci == InvocationEntryBci) { 1607 // standard compilation 1608 nmethod* method_code = method->code(); 1609 if (method_code != nullptr) { 1610 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1611 return method_code; 1612 } 1613 } 1614 if (method->is_not_compilable(comp_level)) { 1615 return nullptr; 1616 } 1617 } else { 1618 // osr compilation 1619 // We accept a higher level osr method 1620 nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false); 1621 if (nm != nullptr) return nm; 1622 if (method->is_not_osr_compilable(comp_level)) return nullptr; 1623 } 1624 1625 assert(!HAS_PENDING_EXCEPTION, "No exception should be present"); 1626 // some prerequisites that are compiler specific 1627 if (compile_reason != CompileTask::Reason_Preload && 1628 !CompileTask::reason_is_precompile(compile_reason) && 1629 (comp->is_c2() || comp->is_jvmci())) { 1630 InternalOOMEMark iom(THREAD); 1631 method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NONASYNC_NULL); 1632 // Resolve all classes seen in the signature of the method 1633 // we are compiling. 1634 Method::load_signature_classes(method, CHECK_AND_CLEAR_NONASYNC_NULL); 1635 } 1636 1637 // If the method is native, do the lookup in the thread requesting 1638 // the compilation. Native lookups can load code, which is not 1639 // permitted during compilation. 1640 // 1641 // Note: A native method implies non-osr compilation which is 1642 // checked with an assertion at the entry of this method. 1643 if (method->is_native() && !method->is_method_handle_intrinsic()) { 1644 address adr = NativeLookup::lookup(method, THREAD); 1645 if (HAS_PENDING_EXCEPTION) { 1646 // In case of an exception looking up the method, we just forget 1647 // about it. The interpreter will kick-in and throw the exception. 1648 method->set_not_compilable("NativeLookup::lookup failed"); // implies is_not_osr_compilable() 1649 CLEAR_PENDING_EXCEPTION; 1650 return nullptr; 1651 } 1652 assert(method->has_native_function(), "must have native code by now"); 1653 } 1654 1655 // RedefineClasses() has replaced this method; just return 1656 if (method->is_old()) { 1657 return nullptr; 1658 } 1659 1660 // JVMTI -- post_compile_event requires jmethod_id() that may require 1661 // a lock the compiling thread can not acquire. Prefetch it here. 1662 if (JvmtiExport::should_post_compiled_method_load()) { 1663 method->jmethod_id(); 1664 } 1665 1666 // do the compilation 1667 if (method->is_native()) { 1668 if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) { 1669 // To properly handle the appendix argument for out-of-line calls we are using a small trampoline that 1670 // pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime). 1671 // 1672 // Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter 1673 // in this case. If we can't generate one and use it we can not execute the out-of-line method handle calls. 1674 AdapterHandlerLibrary::create_native_wrapper(method); 1675 } else { 1676 return nullptr; 1677 } 1678 } else { 1679 // If the compiler is shut off due to code cache getting full 1680 // fail out now so blocking compiles dont hang the java thread 1681 if (!should_compile_new_jobs()) { 1682 return nullptr; 1683 } 1684 bool is_blocking = ReplayCompiles || 1685 !directive->BackgroundCompilationOption || 1686 (PreloadBlocking && (compile_reason == CompileTask::Reason_Preload)); 1687 compile_method_base(method, osr_bci, comp_level, hot_count, compile_reason, requires_online_compilation, is_blocking, THREAD); 1688 } 1689 1690 // return requested nmethod 1691 // We accept a higher level osr method 1692 if (osr_bci == InvocationEntryBci) { 1693 return method->code(); 1694 } 1695 return method->lookup_osr_nmethod_for(osr_bci, comp_level, false); 1696 } 1697 1698 1699 // ------------------------------------------------------------------ 1700 // CompileBroker::compilation_is_complete 1701 // 1702 // See if compilation of this method is already complete. 1703 bool CompileBroker::compilation_is_complete(Method* method, 1704 int osr_bci, 1705 int comp_level, 1706 bool online_only, 1707 CompileTask::CompileReason compile_reason) { 1708 if (compile_reason == CompileTask::Reason_Precompile || 1709 compile_reason == CompileTask::Reason_PrecompileForPreload) { 1710 return false; // FIXME: any restrictions? 1711 } 1712 bool is_osr = (osr_bci != standard_entry_bci); 1713 if (is_osr) { 1714 if (method->is_not_osr_compilable(comp_level)) { 1715 return true; 1716 } else { 1717 nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true); 1718 return (result != nullptr); 1719 } 1720 } else { 1721 if (method->is_not_compilable(comp_level)) { 1722 return true; 1723 } else { 1724 nmethod* result = method->code(); 1725 if (result == nullptr) { 1726 return false; 1727 } 1728 if (online_only && result->is_aot()) { 1729 return false; 1730 } 1731 bool same_level = (comp_level == result->comp_level()); 1732 if (result->has_clinit_barriers()) { 1733 return !same_level; // Allow replace preloaded code with new code of the same level 1734 } 1735 return same_level; 1736 } 1737 } 1738 } 1739 1740 1741 /** 1742 * See if this compilation is already requested. 1743 * 1744 * Implementation note: there is only a single "is in queue" bit 1745 * for each method. This means that the check below is overly 1746 * conservative in the sense that an osr compilation in the queue 1747 * will block a normal compilation from entering the queue (and vice 1748 * versa). This can be remedied by a full queue search to disambiguate 1749 * cases. If it is deemed profitable, this may be done. 1750 */ 1751 bool CompileBroker::compilation_is_in_queue(const methodHandle& method) { 1752 return method->queued_for_compilation(); 1753 } 1754 1755 // ------------------------------------------------------------------ 1756 // CompileBroker::compilation_is_prohibited 1757 // 1758 // See if this compilation is not allowed. 1759 bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded) { 1760 bool is_native = method->is_native(); 1761 // Some compilers may not support the compilation of natives. 1762 AbstractCompiler *comp = compiler(comp_level); 1763 if (is_native && (!CICompileNatives || comp == nullptr)) { 1764 method->set_not_compilable_quietly("native methods not supported", comp_level); 1765 return true; 1766 } 1767 1768 bool is_osr = (osr_bci != standard_entry_bci); 1769 // Some compilers may not support on stack replacement. 1770 if (is_osr && (!CICompileOSR || comp == nullptr)) { 1771 method->set_not_osr_compilable("OSR not supported", comp_level); 1772 return true; 1773 } 1774 1775 // The method may be explicitly excluded by the user. 1776 double scale; 1777 if (excluded || (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, scale) && scale == 0)) { 1778 bool quietly = CompilerOracle::be_quiet(); 1779 if (PrintCompilation && !quietly) { 1780 // This does not happen quietly... 1781 ResourceMark rm; 1782 tty->print("### Excluding %s:%s", 1783 method->is_native() ? "generation of native wrapper" : "compile", 1784 (method->is_static() ? " static" : "")); 1785 method->print_short_name(tty); 1786 tty->cr(); 1787 } 1788 method->set_not_compilable("excluded by CompileCommand", comp_level, !quietly); 1789 } 1790 1791 return false; 1792 } 1793 1794 /** 1795 * Generate serialized IDs for compilation requests. If certain debugging flags are used 1796 * and the ID is not within the specified range, the method is not compiled and 0 is returned. 1797 * The function also allows to generate separate compilation IDs for OSR compilations. 1798 */ 1799 int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) { 1800 #ifdef ASSERT 1801 bool is_osr = (osr_bci != standard_entry_bci); 1802 int id; 1803 if (method->is_native()) { 1804 assert(!is_osr, "can't be osr"); 1805 // Adapters, native wrappers and method handle intrinsics 1806 // should be generated always. 1807 return Atomic::add(CICountNative ? &_native_compilation_id : &_compilation_id, 1); 1808 } else if (CICountOSR && is_osr) { 1809 id = Atomic::add(&_osr_compilation_id, 1); 1810 if (CIStartOSR <= id && id < CIStopOSR) { 1811 return id; 1812 } 1813 } else { 1814 id = Atomic::add(&_compilation_id, 1); 1815 if (CIStart <= id && id < CIStop) { 1816 return id; 1817 } 1818 } 1819 1820 // Method was not in the appropriate compilation range. 1821 method->set_not_compilable_quietly("Not in requested compile id range"); 1822 return 0; 1823 #else 1824 // CICountOSR is a develop flag and set to 'false' by default. In a product built, 1825 // only _compilation_id is incremented. 1826 return Atomic::add(&_compilation_id, 1); 1827 #endif 1828 } 1829 1830 // ------------------------------------------------------------------ 1831 // CompileBroker::assign_compile_id_unlocked 1832 // 1833 // Public wrapper for assign_compile_id that acquires the needed locks 1834 int CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci) { 1835 return assign_compile_id(method, osr_bci); 1836 } 1837 1838 // ------------------------------------------------------------------ 1839 // CompileBroker::create_compile_task 1840 // 1841 // Create a CompileTask object representing the current request for 1842 // compilation. Add this task to the queue. 1843 CompileTask* CompileBroker::create_compile_task(CompileQueue* queue, 1844 int compile_id, 1845 const methodHandle& method, 1846 int osr_bci, 1847 int comp_level, 1848 int hot_count, 1849 AOTCodeEntry* aot_code_entry, 1850 CompileTask::CompileReason compile_reason, 1851 bool requires_online_compilation, 1852 bool blocking) { 1853 CompileTask* new_task = new CompileTask(compile_id, method, osr_bci, comp_level, 1854 hot_count, aot_code_entry, compile_reason, queue, 1855 requires_online_compilation, blocking); 1856 return new_task; 1857 } 1858 1859 #if INCLUDE_JVMCI 1860 // The number of milliseconds to wait before checking if 1861 // JVMCI compilation has made progress. 1862 static const long JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE = 1000; 1863 1864 // The number of JVMCI compilation progress checks that must fail 1865 // before unblocking a thread waiting for a blocking compilation. 1866 static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10; 1867 1868 /** 1869 * Waits for a JVMCI compiler to complete a given task. This thread 1870 * waits until either the task completes or it sees no JVMCI compilation 1871 * progress for N consecutive milliseconds where N is 1872 * JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE * 1873 * JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS. 1874 * 1875 * @return true if this thread needs to delete the task 1876 */ 1877 bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) { 1878 assert(UseJVMCICompiler, "sanity"); 1879 MonitorLocker ml(thread, CompileTaskWait_lock); 1880 int progress_wait_attempts = 0; 1881 jint thread_jvmci_compilation_ticks = 0; 1882 jint global_jvmci_compilation_ticks = jvmci->global_compilation_ticks(); 1883 while (!task->is_complete() && !is_compilation_disabled_forever() && 1884 ml.wait(JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE)) { 1885 JVMCICompileState* jvmci_compile_state = task->blocking_jvmci_compile_state(); 1886 1887 bool progress; 1888 if (jvmci_compile_state != nullptr) { 1889 jint ticks = jvmci_compile_state->compilation_ticks(); 1890 progress = (ticks - thread_jvmci_compilation_ticks) != 0; 1891 JVMCI_event_1("waiting on compilation %d [ticks=%d]", task->compile_id(), ticks); 1892 thread_jvmci_compilation_ticks = ticks; 1893 } else { 1894 // Still waiting on JVMCI compiler queue. This thread may be holding a lock 1895 // that all JVMCI compiler threads are blocked on. We use the global JVMCI 1896 // compilation ticks to determine whether JVMCI compilation 1897 // is still making progress through the JVMCI compiler queue. 1898 jint ticks = jvmci->global_compilation_ticks(); 1899 progress = (ticks - global_jvmci_compilation_ticks) != 0; 1900 JVMCI_event_1("waiting on compilation %d to be queued [ticks=%d]", task->compile_id(), ticks); 1901 global_jvmci_compilation_ticks = ticks; 1902 } 1903 1904 if (!progress) { 1905 if (++progress_wait_attempts == JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS) { 1906 if (PrintCompilation) { 1907 task->print(tty, "wait for blocking compilation timed out"); 1908 } 1909 JVMCI_event_1("waiting on compilation %d timed out", task->compile_id()); 1910 break; 1911 } 1912 } else { 1913 progress_wait_attempts = 0; 1914 } 1915 } 1916 task->clear_waiter(); 1917 return task->is_complete(); 1918 } 1919 #endif 1920 1921 /** 1922 * Wait for the compilation task to complete. 1923 */ 1924 void CompileBroker::wait_for_completion(CompileTask* task) { 1925 if (CIPrintCompileQueue) { 1926 ttyLocker ttyl; 1927 tty->print_cr("BLOCKING FOR COMPILE"); 1928 } 1929 1930 assert(task->is_blocking(), "can only wait on blocking task"); 1931 1932 JavaThread* thread = JavaThread::current(); 1933 1934 methodHandle method(thread, task->method()); 1935 bool free_task; 1936 #if INCLUDE_JVMCI 1937 AbstractCompiler* comp = compiler(task->comp_level()); 1938 if (!UseJVMCINativeLibrary && comp->is_jvmci() && !task->should_wait_for_compilation()) { 1939 // It may return before compilation is completed. 1940 // Note that libjvmci should not pre-emptively unblock 1941 // a thread waiting for a compilation as it does not call 1942 // Java code and so is not deadlock prone like jarjvmci. 1943 free_task = wait_for_jvmci_completion((JVMCICompiler*) comp, task, thread); 1944 } else 1945 #endif 1946 { 1947 free_task = true; 1948 // Wait until the task is complete or compilation is shut down. 1949 MonitorLocker ml(thread, CompileTaskWait_lock); 1950 while (!task->is_complete() && !is_compilation_disabled_forever()) { 1951 ml.wait(); 1952 } 1953 } 1954 1955 // It is harmless to check this status without the lock, because 1956 // completion is a stable property. 1957 if (!task->is_complete() && is_compilation_disabled_forever()) { 1958 // Task is not complete, and we are exiting for compilation shutdown. 1959 // The task can still be executed by some compiler thread, therefore 1960 // we cannot delete it. This will leave task allocated, which leaks it. 1961 // At this (degraded) point, it is less risky to abandon the task, 1962 // rather than attempting a more complicated deletion protocol. 1963 free_task = false; 1964 } 1965 1966 if (free_task) { 1967 assert(task->is_complete(), "Compilation should have completed"); 1968 1969 // By convention, the waiter is responsible for deleting a 1970 // blocking CompileTask. Since there is only one waiter ever 1971 // waiting on a CompileTask, we know that no one else will 1972 // be using this CompileTask; we can delete it. 1973 delete task; 1974 } 1975 } 1976 1977 void CompileBroker::wait_for_no_active_tasks() { 1978 CompileTask::wait_for_no_active_tasks(); 1979 } 1980 1981 /** 1982 * Initialize compiler thread(s) + compiler object(s). The postcondition 1983 * of this function is that the compiler runtimes are initialized and that 1984 * compiler threads can start compiling. 1985 */ 1986 bool CompileBroker::init_compiler_runtime() { 1987 CompilerThread* thread = CompilerThread::current(); 1988 AbstractCompiler* comp = thread->compiler(); 1989 // Final sanity check - the compiler object must exist 1990 guarantee(comp != nullptr, "Compiler object must exist"); 1991 1992 { 1993 // Must switch to native to allocate ci_env 1994 ThreadToNativeFromVM ttn(thread); 1995 ciEnv ci_env((CompileTask*)nullptr); 1996 // Cache Jvmti state 1997 ci_env.cache_jvmti_state(); 1998 // Cache DTrace flags 1999 ci_env.cache_dtrace_flags(); 2000 2001 // Switch back to VM state to do compiler initialization 2002 ThreadInVMfromNative tv(thread); 2003 2004 comp->initialize(); 2005 } 2006 2007 if (comp->is_failed()) { 2008 disable_compilation_forever(); 2009 // If compiler initialization failed, no compiler thread that is specific to a 2010 // particular compiler runtime will ever start to compile methods. 2011 shutdown_compiler_runtime(comp, thread); 2012 return false; 2013 } 2014 2015 // C1 specific check 2016 if (comp->is_c1() && (thread->get_buffer_blob() == nullptr)) { 2017 warning("Initialization of %s thread failed (no space to run compilers)", thread->name()); 2018 return false; 2019 } 2020 2021 return true; 2022 } 2023 2024 void CompileBroker::free_buffer_blob_if_allocated(CompilerThread* thread) { 2025 BufferBlob* blob = thread->get_buffer_blob(); 2026 if (blob != nullptr) { 2027 blob->purge(); 2028 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2029 CodeCache::free(blob); 2030 } 2031 } 2032 2033 /** 2034 * If C1 and/or C2 initialization failed, we shut down all compilation. 2035 * We do this to keep things simple. This can be changed if it ever turns 2036 * out to be a problem. 2037 */ 2038 void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) { 2039 free_buffer_blob_if_allocated(thread); 2040 2041 log_info(compilation)("shutdown_compiler_runtime: " INTPTR_FORMAT, p2i(thread)); 2042 2043 if (comp->should_perform_shutdown()) { 2044 // There are two reasons for shutting down the compiler 2045 // 1) compiler runtime initialization failed 2046 // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing 2047 warning("%s initialization failed. Shutting down all compilers", comp->name()); 2048 2049 // Only one thread per compiler runtime object enters here 2050 // Set state to shut down 2051 comp->set_shut_down(); 2052 2053 // Delete all queued compilation tasks to make compiler threads exit faster. 2054 if (_c1_compile_queue != nullptr) { 2055 _c1_compile_queue->delete_all(); 2056 } 2057 2058 if (_c2_compile_queue != nullptr) { 2059 _c2_compile_queue->delete_all(); 2060 } 2061 2062 // Set flags so that we continue execution with using interpreter only. 2063 UseCompiler = false; 2064 UseInterpreter = true; 2065 2066 // We could delete compiler runtimes also. However, there are references to 2067 // the compiler runtime(s) (e.g., nmethod::is_compiled_by_c1()) which then 2068 // fail. This can be done later if necessary. 2069 } 2070 } 2071 2072 /** 2073 * Helper function to create new or reuse old CompileLog. 2074 */ 2075 CompileLog* CompileBroker::get_log(CompilerThread* ct) { 2076 if (!LogCompilation) return nullptr; 2077 2078 AbstractCompiler *compiler = ct->compiler(); 2079 bool c1 = compiler->is_c1(); 2080 jobject* compiler_objects = c1 ? _compiler1_objects : _compiler2_objects; 2081 assert(compiler_objects != nullptr, "must be initialized at this point"); 2082 CompileLog** logs = c1 ? _compiler1_logs : _compiler2_logs; 2083 assert(logs != nullptr, "must be initialized at this point"); 2084 int count = c1 ? _c1_count : _c2_count; 2085 2086 if (ct->queue() == _ac1_compile_queue || ct->queue() == _ac2_compile_queue) { 2087 compiler_objects = _ac_objects; 2088 logs = _ac_logs; 2089 count = _ac_count; 2090 } 2091 // Find Compiler number by its threadObj. 2092 oop compiler_obj = ct->threadObj(); 2093 int compiler_number = 0; 2094 bool found = false; 2095 for (; compiler_number < count; compiler_number++) { 2096 if (JNIHandles::resolve_non_null(compiler_objects[compiler_number]) == compiler_obj) { 2097 found = true; 2098 break; 2099 } 2100 } 2101 assert(found, "Compiler must exist at this point"); 2102 2103 // Determine pointer for this thread's log. 2104 CompileLog** log_ptr = &logs[compiler_number]; 2105 2106 // Return old one if it exists. 2107 CompileLog* log = *log_ptr; 2108 if (log != nullptr) { 2109 ct->init_log(log); 2110 return log; 2111 } 2112 2113 // Create a new one and remember it. 2114 init_compiler_thread_log(); 2115 log = ct->log(); 2116 *log_ptr = log; 2117 return log; 2118 } 2119 2120 // ------------------------------------------------------------------ 2121 // CompileBroker::compiler_thread_loop 2122 // 2123 // The main loop run by a CompilerThread. 2124 void CompileBroker::compiler_thread_loop() { 2125 CompilerThread* thread = CompilerThread::current(); 2126 CompileQueue* queue = thread->queue(); 2127 // For the thread that initializes the ciObjectFactory 2128 // this resource mark holds all the shared objects 2129 ResourceMark rm; 2130 2131 // First thread to get here will initialize the compiler interface 2132 2133 { 2134 ASSERT_IN_VM; 2135 MutexLocker only_one (thread, CompileThread_lock); 2136 if (!ciObjectFactory::is_initialized()) { 2137 ciObjectFactory::initialize(); 2138 } 2139 } 2140 2141 // Open a log. 2142 CompileLog* log = get_log(thread); 2143 if (log != nullptr) { 2144 log->begin_elem("start_compile_thread name='%s' thread='%zu' process='%d'", 2145 thread->name(), 2146 os::current_thread_id(), 2147 os::current_process_id()); 2148 log->stamp(); 2149 log->end_elem(); 2150 } 2151 2152 // If compiler thread/runtime initialization fails, exit the compiler thread 2153 if (!init_compiler_runtime()) { 2154 return; 2155 } 2156 2157 thread->start_idle_timer(); 2158 2159 // Poll for new compilation tasks as long as the JVM runs. Compilation 2160 // should only be disabled if something went wrong while initializing the 2161 // compiler runtimes. This, in turn, should not happen. The only known case 2162 // when compiler runtime initialization fails is if there is not enough free 2163 // space in the code cache to generate the necessary stubs, etc. 2164 while (!is_compilation_disabled_forever()) { 2165 // We need this HandleMark to avoid leaking VM handles. 2166 HandleMark hm(thread); 2167 2168 RecompilationPolicy::recompilation_step(AOTRecompilationWorkUnitSize, thread); 2169 2170 CompileTask* task = queue->get(thread); 2171 if (task == nullptr) { 2172 if (UseDynamicNumberOfCompilerThreads) { 2173 // Access compiler_count under lock to enforce consistency. 2174 MutexLocker only_one(CompileThread_lock); 2175 if (can_remove(thread, true)) { 2176 if (trace_compiler_threads()) { 2177 ResourceMark rm; 2178 stringStream msg; 2179 msg.print("Removing compiler thread %s after " JLONG_FORMAT " ms idle time", 2180 thread->name(), thread->idle_time_millis()); 2181 print_compiler_threads(msg); 2182 } 2183 2184 // Notify compiler that the compiler thread is about to stop 2185 thread->compiler()->stopping_compiler_thread(thread); 2186 2187 free_buffer_blob_if_allocated(thread); 2188 return; // Stop this thread. 2189 } 2190 } 2191 } else { 2192 // Assign the task to the current thread. Mark this compilation 2193 // thread as active for the profiler. 2194 // CompileTaskWrapper also keeps the Method* from being deallocated if redefinition 2195 // occurs after fetching the compile task off the queue. 2196 CompileTaskWrapper ctw(task); 2197 methodHandle method(thread, task->method()); 2198 2199 // Never compile a method if breakpoints are present in it 2200 if (method()->number_of_breakpoints() == 0) { 2201 // Compile the method. 2202 if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { 2203 invoke_compiler_on_method(task); 2204 thread->start_idle_timer(); 2205 } else { 2206 // After compilation is disabled, remove remaining methods from queue 2207 method->clear_queued_for_compilation(); 2208 method->set_pending_queue_processed(false); 2209 task->set_failure_reason("compilation is disabled"); 2210 } 2211 } else { 2212 task->set_failure_reason("breakpoints are present"); 2213 } 2214 2215 // Don't use AOT compielr threads for dynamic C1 and C2 threads creation. 2216 if (UseDynamicNumberOfCompilerThreads && 2217 (queue == _c1_compile_queue || queue == _c2_compile_queue)) { 2218 possibly_add_compiler_threads(thread); 2219 assert(!thread->has_pending_exception(), "should have been handled"); 2220 } 2221 } 2222 } 2223 2224 // Shut down compiler runtime 2225 shutdown_compiler_runtime(thread->compiler(), thread); 2226 } 2227 2228 // ------------------------------------------------------------------ 2229 // CompileBroker::init_compiler_thread_log 2230 // 2231 // Set up state required by +LogCompilation. 2232 void CompileBroker::init_compiler_thread_log() { 2233 CompilerThread* thread = CompilerThread::current(); 2234 char file_name[4*K]; 2235 FILE* fp = nullptr; 2236 intx thread_id = os::current_thread_id(); 2237 for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) { 2238 const char* dir = (try_temp_dir ? os::get_temp_directory() : nullptr); 2239 if (dir == nullptr) { 2240 jio_snprintf(file_name, sizeof(file_name), "hs_c%zu_pid%u.log", 2241 thread_id, os::current_process_id()); 2242 } else { 2243 jio_snprintf(file_name, sizeof(file_name), 2244 "%s%shs_c%zu_pid%u.log", dir, 2245 os::file_separator(), thread_id, os::current_process_id()); 2246 } 2247 2248 fp = os::fopen(file_name, "wt"); 2249 if (fp != nullptr) { 2250 if (LogCompilation && Verbose) { 2251 tty->print_cr("Opening compilation log %s", file_name); 2252 } 2253 CompileLog* log = new(mtCompiler) CompileLog(file_name, fp, thread_id); 2254 if (log == nullptr) { 2255 fclose(fp); 2256 return; 2257 } 2258 thread->init_log(log); 2259 2260 if (xtty != nullptr) { 2261 ttyLocker ttyl; 2262 // Record any per thread log files 2263 xtty->elem("thread_logfile thread='%zd' filename='%s'", thread_id, file_name); 2264 } 2265 return; 2266 } 2267 } 2268 warning("Cannot open log file: %s", file_name); 2269 } 2270 2271 void CompileBroker::log_metaspace_failure() { 2272 const char* message = "some methods may not be compiled because metaspace " 2273 "is out of memory"; 2274 if (CompilationLog::log() != nullptr) { 2275 CompilationLog::log()->log_metaspace_failure(message); 2276 } 2277 if (PrintCompilation) { 2278 tty->print_cr("COMPILE PROFILING SKIPPED: %s", message); 2279 } 2280 } 2281 2282 2283 // ------------------------------------------------------------------ 2284 // CompileBroker::set_should_block 2285 // 2286 // Set _should_block. 2287 // Call this from the VM, with Threads_lock held and a safepoint requested. 2288 void CompileBroker::set_should_block() { 2289 assert(Threads_lock->owner() == Thread::current(), "must have threads lock"); 2290 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint already"); 2291 #ifndef PRODUCT 2292 if (PrintCompilation && (Verbose || WizardMode)) 2293 tty->print_cr("notifying compiler thread pool to block"); 2294 #endif 2295 _should_block = true; 2296 } 2297 2298 // ------------------------------------------------------------------ 2299 // CompileBroker::maybe_block 2300 // 2301 // Call this from the compiler at convenient points, to poll for _should_block. 2302 void CompileBroker::maybe_block() { 2303 if (_should_block) { 2304 #ifndef PRODUCT 2305 if (PrintCompilation && (Verbose || WizardMode)) 2306 tty->print_cr("compiler thread " INTPTR_FORMAT " poll detects block request", p2i(Thread::current())); 2307 #endif 2308 // If we are executing a task during the request to block, report the task 2309 // before disappearing. 2310 CompilerThread* thread = CompilerThread::current(); 2311 if (thread != nullptr) { 2312 CompileTask* task = thread->task(); 2313 if (task != nullptr) { 2314 if (PrintCompilation) { 2315 task->print(tty, "blocked"); 2316 } 2317 task->print_ul("blocked"); 2318 } 2319 } 2320 // Go to VM state and block for final VM shutdown safepoint. 2321 ThreadInVMfromNative tivfn(JavaThread::current()); 2322 assert(false, "Should never unblock from TIVNM entry"); 2323 } 2324 } 2325 2326 // wrapper for CodeCache::print_summary() 2327 static void codecache_print(bool detailed) 2328 { 2329 stringStream s; 2330 // Dump code cache into a buffer before locking the tty, 2331 { 2332 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2333 CodeCache::print_summary(&s, detailed); 2334 } 2335 ttyLocker ttyl; 2336 tty->print("%s", s.freeze()); 2337 } 2338 2339 // wrapper for CodeCache::print_summary() using outputStream 2340 static void codecache_print(outputStream* out, bool detailed) { 2341 stringStream s; 2342 2343 // Dump code cache into a buffer 2344 { 2345 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2346 CodeCache::print_summary(&s, detailed); 2347 } 2348 2349 char* remaining_log = s.as_string(); 2350 while (*remaining_log != '\0') { 2351 char* eol = strchr(remaining_log, '\n'); 2352 if (eol == nullptr) { 2353 out->print_cr("%s", remaining_log); 2354 remaining_log = remaining_log + strlen(remaining_log); 2355 } else { 2356 *eol = '\0'; 2357 out->print_cr("%s", remaining_log); 2358 remaining_log = eol + 1; 2359 } 2360 } 2361 } 2362 2363 void CompileBroker::handle_compile_error(CompilerThread* thread, CompileTask* task, ciEnv* ci_env, 2364 int compilable, const char* failure_reason) { 2365 if (!AbortVMOnCompilationFailure) { 2366 return; 2367 } 2368 if (compilable == ciEnv::MethodCompilable_not_at_tier) { 2369 fatal("Not compilable at tier %d: %s", task->comp_level(), failure_reason); 2370 } 2371 if (compilable == ciEnv::MethodCompilable_never) { 2372 fatal("Never compilable: %s", failure_reason); 2373 } 2374 } 2375 2376 static void post_compilation_event(EventCompilation& event, CompileTask* task) { 2377 assert(task != nullptr, "invariant"); 2378 CompilerEvent::CompilationEvent::post(event, 2379 task->compile_id(), 2380 task->compiler()->type(), 2381 task->method(), 2382 task->comp_level(), 2383 task->is_success(), 2384 task->osr_bci() != CompileBroker::standard_entry_bci, 2385 task->nm_total_size(), 2386 task->num_inlined_bytecodes(), 2387 task->arena_bytes()); 2388 } 2389 2390 int DirectivesStack::_depth = 0; 2391 CompilerDirectives* DirectivesStack::_top = nullptr; 2392 CompilerDirectives* DirectivesStack::_bottom = nullptr; 2393 2394 // Acquires Compilation_lock and waits for it to be notified 2395 // as long as WhiteBox::compilation_locked is true. 2396 static void whitebox_lock_compilation() { 2397 MonitorLocker locker(Compilation_lock, Mutex::_no_safepoint_check_flag); 2398 while (WhiteBox::compilation_locked) { 2399 locker.wait(); 2400 } 2401 } 2402 2403 // ------------------------------------------------------------------ 2404 // CompileBroker::invoke_compiler_on_method 2405 // 2406 // Compile a method. 2407 // 2408 void CompileBroker::invoke_compiler_on_method(CompileTask* task) { 2409 task->print_ul(); 2410 elapsedTimer time; 2411 2412 DirectiveSet* directive = task->directive(); 2413 2414 CompilerThread* thread = CompilerThread::current(); 2415 ResourceMark rm(thread); 2416 2417 if (CompilationLog::log() != nullptr) { 2418 CompilationLog::log()->log_compile(thread, task); 2419 } 2420 2421 // Common flags. 2422 int compile_id = task->compile_id(); 2423 int osr_bci = task->osr_bci(); 2424 bool is_osr = (osr_bci != standard_entry_bci); 2425 bool should_log = (thread->log() != nullptr); 2426 bool should_break = false; 2427 bool should_print_compilation = PrintCompilation || directive->PrintCompilationOption; 2428 const int task_level = task->comp_level(); 2429 AbstractCompiler* comp = task->compiler(); 2430 { 2431 // create the handle inside it's own block so it can't 2432 // accidentally be referenced once the thread transitions to 2433 // native. The NoHandleMark before the transition should catch 2434 // any cases where this occurs in the future. 2435 methodHandle method(thread, task->method()); 2436 2437 assert(!method->is_native(), "no longer compile natives"); 2438 2439 // Update compile information when using perfdata. 2440 if (UsePerfData) { 2441 update_compile_perf_data(thread, method, is_osr); 2442 } 2443 2444 DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level)); 2445 } 2446 2447 should_break = directive->BreakAtCompileOption || task->check_break_at_flags(); 2448 if (should_log && !directive->LogOption) { 2449 should_log = false; 2450 } 2451 2452 // Allocate a new set of JNI handles. 2453 JNIHandleMark jhm(thread); 2454 Method* target_handle = task->method(); 2455 int compilable = ciEnv::MethodCompilable; 2456 const char* failure_reason = nullptr; 2457 bool failure_reason_on_C_heap = false; 2458 const char* retry_message = nullptr; 2459 2460 #if INCLUDE_JVMCI 2461 if (UseJVMCICompiler && comp != nullptr && comp->is_jvmci()) { 2462 JVMCICompiler* jvmci = (JVMCICompiler*) comp; 2463 2464 TraceTime t1("compilation", &time); 2465 EventCompilation event; 2466 JVMCICompileState compile_state(task, jvmci); 2467 JVMCIRuntime *runtime = nullptr; 2468 2469 if (JVMCI::in_shutdown()) { 2470 failure_reason = "in JVMCI shutdown"; 2471 retry_message = "not retryable"; 2472 compilable = ciEnv::MethodCompilable_never; 2473 } else if (compile_state.target_method_is_old()) { 2474 // Skip redefined methods 2475 failure_reason = "redefined method"; 2476 retry_message = "not retryable"; 2477 compilable = ciEnv::MethodCompilable_never; 2478 } else { 2479 JVMCIEnv env(thread, &compile_state, __FILE__, __LINE__); 2480 if (env.init_error() != JNI_OK) { 2481 const char* msg = env.init_error_msg(); 2482 failure_reason = os::strdup(err_msg("Error attaching to libjvmci (err: %d, %s)", 2483 env.init_error(), msg == nullptr ? "unknown" : msg), mtJVMCI); 2484 bool reason_on_C_heap = true; 2485 // In case of JNI_ENOMEM, there's a good chance a subsequent attempt to create libjvmci or attach to it 2486 // might succeed. Other errors most likely indicate a non-recoverable error in the JVMCI runtime. 2487 bool retryable = env.init_error() == JNI_ENOMEM; 2488 compile_state.set_failure(retryable, failure_reason, reason_on_C_heap); 2489 } 2490 if (failure_reason == nullptr) { 2491 if (WhiteBoxAPI && WhiteBox::compilation_locked) { 2492 // Must switch to native to block 2493 ThreadToNativeFromVM ttn(thread); 2494 whitebox_lock_compilation(); 2495 } 2496 methodHandle method(thread, target_handle); 2497 runtime = env.runtime(); 2498 runtime->compile_method(&env, jvmci, method, osr_bci); 2499 2500 failure_reason = compile_state.failure_reason(); 2501 failure_reason_on_C_heap = compile_state.failure_reason_on_C_heap(); 2502 if (!compile_state.retryable()) { 2503 retry_message = "not retryable"; 2504 compilable = ciEnv::MethodCompilable_not_at_tier; 2505 } 2506 if (!task->is_success()) { 2507 assert(failure_reason != nullptr, "must specify failure_reason"); 2508 } 2509 } 2510 } 2511 if (!task->is_success() && !JVMCI::in_shutdown()) { 2512 handle_compile_error(thread, task, nullptr, compilable, failure_reason); 2513 } 2514 if (event.should_commit()) { 2515 post_compilation_event(event, task); 2516 } 2517 2518 if (runtime != nullptr) { 2519 runtime->post_compile(thread); 2520 } 2521 } else 2522 #endif // INCLUDE_JVMCI 2523 { 2524 NoHandleMark nhm; 2525 ThreadToNativeFromVM ttn(thread); 2526 2527 ciEnv ci_env(task); 2528 if (should_break) { 2529 ci_env.set_break_at_compile(true); 2530 } 2531 if (should_log) { 2532 ci_env.set_log(thread->log()); 2533 } 2534 assert(thread->env() == &ci_env, "set by ci_env"); 2535 // The thread-env() field is cleared in ~CompileTaskWrapper. 2536 2537 // Cache Jvmti state 2538 bool method_is_old = ci_env.cache_jvmti_state(); 2539 2540 // Skip redefined methods 2541 if (method_is_old) { 2542 ci_env.record_method_not_compilable("redefined method", true); 2543 } 2544 2545 // Cache DTrace flags 2546 ci_env.cache_dtrace_flags(); 2547 2548 ciMethod* target = ci_env.get_method_from_handle(target_handle); 2549 2550 TraceTime t1("compilation", &time); 2551 EventCompilation event; 2552 2553 if (comp == nullptr) { 2554 ci_env.record_method_not_compilable("no compiler"); 2555 } else if (!ci_env.failing()) { 2556 if (WhiteBoxAPI && WhiteBox::compilation_locked) { 2557 whitebox_lock_compilation(); 2558 } 2559 comp->compile_method(&ci_env, target, osr_bci, true, directive); 2560 2561 /* Repeat compilation without installing code for profiling purposes */ 2562 int repeat_compilation_count = directive->RepeatCompilationOption; 2563 while (repeat_compilation_count > 0) { 2564 ResourceMark rm(thread); 2565 task->print_ul("NO CODE INSTALLED"); 2566 comp->compile_method(&ci_env, target, osr_bci, false, directive); 2567 repeat_compilation_count--; 2568 } 2569 } 2570 2571 2572 if (!ci_env.failing() && !task->is_success() && !task->is_precompile()) { 2573 assert(ci_env.failure_reason() != nullptr, "expect failure reason"); 2574 assert(false, "compiler should always document failure: %s", ci_env.failure_reason()); 2575 // The compiler elected, without comment, not to register a result. 2576 // Do not attempt further compilations of this method. 2577 ci_env.record_method_not_compilable("compile failed"); 2578 } 2579 2580 // Copy this bit to the enclosing block: 2581 compilable = ci_env.compilable(); 2582 2583 if (ci_env.failing()) { 2584 // Duplicate the failure reason string, so that it outlives ciEnv 2585 failure_reason = os::strdup(ci_env.failure_reason(), mtCompiler); 2586 failure_reason_on_C_heap = true; 2587 retry_message = ci_env.retry_message(); 2588 ci_env.report_failure(failure_reason); 2589 } 2590 2591 if (ci_env.failing()) { 2592 handle_compile_error(thread, task, &ci_env, compilable, failure_reason); 2593 } 2594 if (event.should_commit()) { 2595 post_compilation_event(event, task); 2596 } 2597 } 2598 2599 if (failure_reason != nullptr) { 2600 task->set_failure_reason(failure_reason, failure_reason_on_C_heap); 2601 if (CompilationLog::log() != nullptr) { 2602 CompilationLog::log()->log_failure(thread, task, failure_reason, retry_message); 2603 } 2604 if (PrintCompilation || directive->PrintCompilationOption) { 2605 FormatBufferResource msg = retry_message != nullptr ? 2606 FormatBufferResource("COMPILE SKIPPED: %s (%s)", failure_reason, retry_message) : 2607 FormatBufferResource("COMPILE SKIPPED: %s", failure_reason); 2608 task->print(tty, msg); 2609 } 2610 } 2611 2612 task->mark_finished(os::elapsed_counter()); 2613 DirectivesStack::release(directive); 2614 2615 methodHandle method(thread, task->method()); 2616 2617 DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success()); 2618 2619 collect_statistics(thread, time, task); 2620 2621 if (PrintCompilation && PrintCompilation2) { 2622 tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp 2623 tty->print("%4d ", compile_id); // print compilation number 2624 tty->print("%s ", (is_osr ? "%" : (task->is_aot_load() ? (task->preload() ? "P" : "A") : " "))); 2625 if (task->is_success()) { 2626 tty->print("size: %d(%d) ", task->nm_total_size(), task->nm_insts_size()); 2627 } 2628 tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes()); 2629 } 2630 2631 Log(compilation, codecache) log; 2632 if (log.is_debug()) { 2633 LogStream ls(log.debug()); 2634 codecache_print(&ls, /* detailed= */ false); 2635 } 2636 if (PrintCodeCacheOnCompilation) { 2637 codecache_print(/* detailed= */ false); 2638 } 2639 // Disable compilation, if required. 2640 switch (compilable) { 2641 case ciEnv::MethodCompilable_never: 2642 if (is_osr) 2643 method->set_not_osr_compilable_quietly("MethodCompilable_never"); 2644 else 2645 method->set_not_compilable_quietly("MethodCompilable_never"); 2646 break; 2647 case ciEnv::MethodCompilable_not_at_tier: 2648 if (is_osr) 2649 method->set_not_osr_compilable_quietly("MethodCompilable_not_at_tier", task_level); 2650 else 2651 method->set_not_compilable_quietly("MethodCompilable_not_at_tier", task_level); 2652 break; 2653 } 2654 2655 // Note that the queued_for_compilation bits are cleared without 2656 // protection of a mutex. [They were set by the requester thread, 2657 // when adding the task to the compile queue -- at which time the 2658 // compile queue lock was held. Subsequently, we acquired the compile 2659 // queue lock to get this task off the compile queue; thus (to belabour 2660 // the point somewhat) our clearing of the bits must be occurring 2661 // only after the setting of the bits. See also 14012000 above. 2662 method->clear_queued_for_compilation(); 2663 method->set_pending_queue_processed(false); 2664 2665 if (should_print_compilation) { 2666 ResourceMark rm; 2667 task->print_tty(); 2668 } 2669 } 2670 2671 /** 2672 * The CodeCache is full. Print warning and disable compilation. 2673 * Schedule code cache cleaning so compilation can continue later. 2674 * This function needs to be called only from CodeCache::allocate(), 2675 * since we currently handle a full code cache uniformly. 2676 */ 2677 void CompileBroker::handle_full_code_cache(CodeBlobType code_blob_type) { 2678 UseInterpreter = true; 2679 if (UseCompiler || AlwaysCompileLoopMethods ) { 2680 if (xtty != nullptr) { 2681 stringStream s; 2682 // Dump code cache state into a buffer before locking the tty, 2683 // because log_state() will use locks causing lock conflicts. 2684 CodeCache::log_state(&s); 2685 // Lock to prevent tearing 2686 ttyLocker ttyl; 2687 xtty->begin_elem("code_cache_full"); 2688 xtty->print("%s", s.freeze()); 2689 xtty->stamp(); 2690 xtty->end_elem(); 2691 } 2692 2693 #ifndef PRODUCT 2694 if (ExitOnFullCodeCache) { 2695 codecache_print(/* detailed= */ true); 2696 before_exit(JavaThread::current()); 2697 exit_globals(); // will delete tty 2698 vm_direct_exit(1); 2699 } 2700 #endif 2701 if (UseCodeCacheFlushing) { 2702 // Since code cache is full, immediately stop new compiles 2703 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { 2704 log_info(codecache)("Code cache is full - disabling compilation"); 2705 } 2706 } else { 2707 disable_compilation_forever(); 2708 } 2709 2710 CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning()); 2711 } 2712 } 2713 2714 // ------------------------------------------------------------------ 2715 // CompileBroker::update_compile_perf_data 2716 // 2717 // Record this compilation for debugging purposes. 2718 void CompileBroker::update_compile_perf_data(CompilerThread* thread, const methodHandle& method, bool is_osr) { 2719 ResourceMark rm; 2720 char* method_name = method->name()->as_C_string(); 2721 char current_method[CompilerCounters::cmname_buffer_length]; 2722 size_t maxLen = CompilerCounters::cmname_buffer_length; 2723 2724 const char* class_name = method->method_holder()->name()->as_C_string(); 2725 2726 size_t s1len = strlen(class_name); 2727 size_t s2len = strlen(method_name); 2728 2729 // check if we need to truncate the string 2730 if (s1len + s2len + 2 > maxLen) { 2731 2732 // the strategy is to lop off the leading characters of the 2733 // class name and the trailing characters of the method name. 2734 2735 if (s2len + 2 > maxLen) { 2736 // lop of the entire class name string, let snprintf handle 2737 // truncation of the method name. 2738 class_name += s1len; // null string 2739 } 2740 else { 2741 // lop off the extra characters from the front of the class name 2742 class_name += ((s1len + s2len + 2) - maxLen); 2743 } 2744 } 2745 2746 jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name); 2747 2748 int last_compile_type = normal_compile; 2749 if (CICountOSR && is_osr) { 2750 last_compile_type = osr_compile; 2751 } else if (CICountNative && method->is_native()) { 2752 last_compile_type = native_compile; 2753 } 2754 2755 CompilerCounters* counters = thread->counters(); 2756 counters->set_current_method(current_method); 2757 counters->set_compile_type((jlong) last_compile_type); 2758 } 2759 2760 // ------------------------------------------------------------------ 2761 // CompileBroker::collect_statistics 2762 // 2763 // Collect statistics about the compilation. 2764 2765 void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task) { 2766 bool success = task->is_success(); 2767 methodHandle method (thread, task->method()); 2768 int compile_id = task->compile_id(); 2769 bool is_osr = (task->osr_bci() != standard_entry_bci); 2770 const int comp_level = task->comp_level(); 2771 CompilerCounters* counters = thread->counters(); 2772 2773 MutexLocker locker(CompileStatistics_lock); 2774 2775 // _perf variables are production performance counters which are 2776 // updated regardless of the setting of the CITime and CITimeEach flags 2777 // 2778 2779 // account all time, including bailouts and failures in this counter; 2780 // C1 and C2 counters are counting both successful and unsuccessful compiles 2781 _t_total_compilation.add(&time); 2782 2783 // Update compilation times. Used by the implementation of JFR CompilerStatistics 2784 // and java.lang.management.CompilationMXBean. 2785 _perf_total_compilation->inc(time.ticks()); 2786 _peak_compilation_time = MAX2(time.milliseconds(), _peak_compilation_time); 2787 2788 if (!success) { 2789 _total_bailout_count++; 2790 if (UsePerfData) { 2791 _perf_last_failed_method->set_value(counters->current_method()); 2792 _perf_last_failed_type->set_value(counters->compile_type()); 2793 _perf_total_bailout_count->inc(); 2794 } 2795 _t_bailedout_compilation.add(&time); 2796 2797 if (CITime || log_is_enabled(Info, init)) { 2798 CompilerStatistics* stats = nullptr; 2799 if (task->is_aot_load()) { 2800 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2801 stats = &_aot_stats_per_level[level]; 2802 } else { 2803 stats = &_stats_per_level[comp_level-1]; 2804 } 2805 stats->_bailout.update(time, 0); 2806 } 2807 } else if (!task->is_success()) { 2808 if (UsePerfData) { 2809 _perf_last_invalidated_method->set_value(counters->current_method()); 2810 _perf_last_invalidated_type->set_value(counters->compile_type()); 2811 _perf_total_invalidated_count->inc(); 2812 } 2813 _total_invalidated_count++; 2814 _t_invalidated_compilation.add(&time); 2815 2816 if (CITime || log_is_enabled(Info, init)) { 2817 CompilerStatistics* stats = nullptr; 2818 if (task->is_aot_load()) { 2819 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2820 stats = &_aot_stats_per_level[level]; 2821 } else { 2822 stats = &_stats_per_level[comp_level-1]; 2823 } 2824 stats->_invalidated.update(time, 0); 2825 } 2826 } else { 2827 // Compilation succeeded 2828 if (CITime || log_is_enabled(Info, init)) { 2829 int bytes_compiled = method->code_size() + task->num_inlined_bytecodes(); 2830 if (is_osr) { 2831 _t_osr_compilation.add(&time); 2832 _sum_osr_bytes_compiled += bytes_compiled; 2833 } else { 2834 _t_standard_compilation.add(&time); 2835 _sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); 2836 } 2837 2838 // Collect statistic per compilation level 2839 if (task->is_aot_load()) { 2840 _aot_stats._standard.update(time, bytes_compiled); 2841 _aot_stats._nmethods_size += task->nm_total_size(); 2842 _aot_stats._nmethods_code_size += task->nm_insts_size(); 2843 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2844 CompilerStatistics* stats = &_aot_stats_per_level[level]; 2845 stats->_standard.update(time, bytes_compiled); 2846 stats->_nmethods_size += task->nm_total_size(); 2847 stats->_nmethods_code_size += task->nm_insts_size(); 2848 } else if (comp_level > CompLevel_none && comp_level <= CompLevel_full_optimization) { 2849 CompilerStatistics* stats = &_stats_per_level[comp_level-1]; 2850 if (is_osr) { 2851 stats->_osr.update(time, bytes_compiled); 2852 } else { 2853 stats->_standard.update(time, bytes_compiled); 2854 } 2855 stats->_nmethods_size += task->nm_total_size(); 2856 stats->_nmethods_code_size += task->nm_insts_size(); 2857 } else { 2858 assert(false, "CompilerStatistics object does not exist for compilation level %d", comp_level); 2859 } 2860 2861 // Collect statistic per compiler 2862 AbstractCompiler* comp = task->compiler(); 2863 if (comp && !task->is_aot_load()) { 2864 CompilerStatistics* stats = comp->stats(); 2865 if (is_osr) { 2866 stats->_osr.update(time, bytes_compiled); 2867 } else { 2868 stats->_standard.update(time, bytes_compiled); 2869 } 2870 stats->_nmethods_size += task->nm_total_size(); 2871 stats->_nmethods_code_size += task->nm_insts_size(); 2872 } else if (!task->is_aot_load()) { // if (!comp) 2873 assert(false, "Compiler object must exist"); 2874 } 2875 } 2876 2877 if (UsePerfData) { 2878 // save the name of the last method compiled 2879 _perf_last_method->set_value(counters->current_method()); 2880 _perf_last_compile_type->set_value(counters->compile_type()); 2881 _perf_last_compile_size->set_value(method->code_size() + 2882 task->num_inlined_bytecodes()); 2883 if (is_osr) { 2884 _perf_osr_compilation->inc(time.ticks()); 2885 _perf_sum_osr_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes()); 2886 } else { 2887 _perf_standard_compilation->inc(time.ticks()); 2888 _perf_sum_standard_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes()); 2889 } 2890 } 2891 2892 if (CITimeEach) { 2893 double compile_time = time.seconds(); 2894 double bytes_per_sec = compile_time == 0.0 ? 0.0 : (double)(method->code_size() + task->num_inlined_bytecodes()) / compile_time; 2895 tty->print_cr("%3d seconds: %6.3f bytes/sec : %f (bytes %d + %d inlined)", 2896 compile_id, compile_time, bytes_per_sec, method->code_size(), task->num_inlined_bytecodes()); 2897 } 2898 2899 // Collect counts of successful compilations 2900 _sum_nmethod_size += task->nm_total_size(); 2901 _sum_nmethod_code_size += task->nm_insts_size(); 2902 _total_compile_count++; 2903 2904 if (UsePerfData) { 2905 _perf_sum_nmethod_size->inc( task->nm_total_size()); 2906 _perf_sum_nmethod_code_size->inc(task->nm_insts_size()); 2907 _perf_total_compile_count->inc(); 2908 } 2909 2910 if (is_osr) { 2911 if (UsePerfData) _perf_total_osr_compile_count->inc(); 2912 _total_osr_compile_count++; 2913 } else { 2914 if (UsePerfData) _perf_total_standard_compile_count->inc(); 2915 _total_standard_compile_count++; 2916 } 2917 } 2918 // set the current method for the thread to null 2919 if (UsePerfData) counters->set_current_method(""); 2920 } 2921 2922 const char* CompileBroker::compiler_name(int comp_level) { 2923 AbstractCompiler *comp = CompileBroker::compiler(comp_level); 2924 if (comp == nullptr) { 2925 return "no compiler"; 2926 } else { 2927 return (comp->name()); 2928 } 2929 } 2930 2931 jlong CompileBroker::total_compilation_ticks() { 2932 return _perf_total_compilation != nullptr ? _perf_total_compilation->get_value() : 0; 2933 } 2934 2935 void CompileBroker::log_not_entrant(nmethod* nm) { 2936 _total_not_entrant_count++; 2937 if (CITime || log_is_enabled(Info, init)) { 2938 CompilerStatistics* stats = nullptr; 2939 int level = nm->comp_level(); 2940 if (nm->is_aot()) { 2941 if (nm->preloaded()) { 2942 assert(level == CompLevel_full_optimization, "%d", level); 2943 level = CompLevel_full_optimization + 1; 2944 } 2945 stats = &_aot_stats_per_level[level - 1]; 2946 } else { 2947 stats = &_stats_per_level[level - 1]; 2948 } 2949 stats->_made_not_entrant._count++; 2950 } 2951 } 2952 2953 void CompileBroker::print_times(const char* name, CompilerStatistics* stats) { 2954 tty->print_cr(" %s {speed: %6.3f bytes/s; standard: %6.3f s, %u bytes, %u methods; osr: %6.3f s, %u bytes, %u methods; nmethods_size: %u bytes; nmethods_code_size: %u bytes}", 2955 name, stats->bytes_per_second(), 2956 stats->_standard._time.seconds(), stats->_standard._bytes, stats->_standard._count, 2957 stats->_osr._time.seconds(), stats->_osr._bytes, stats->_osr._count, 2958 stats->_nmethods_size, stats->_nmethods_code_size); 2959 } 2960 2961 static void print_helper(outputStream* st, const char* name, CompilerStatistics::Data data, bool print_time = true) { 2962 if (data._count > 0) { 2963 st->print("; %s: %4u methods", name, data._count); 2964 if (print_time) { 2965 st->print(" (in %.3fs)", data._time.seconds()); 2966 } 2967 } 2968 } 2969 2970 static void print_tier_helper(outputStream* st, const char* prefix, int tier, CompilerStatistics* stats) { 2971 st->print(" %s%d: %5u methods", prefix, tier, stats->_standard._count); 2972 if (stats->_standard._count > 0) { 2973 st->print(" (in %.3fs)", stats->_standard._time.seconds()); 2974 } 2975 print_helper(st, "osr", stats->_osr); 2976 print_helper(st, "bailout", stats->_bailout); 2977 print_helper(st, "invalid", stats->_invalidated); 2978 print_helper(st, "not_entrant", stats->_made_not_entrant, false); 2979 st->cr(); 2980 } 2981 2982 static void print_queue_info(outputStream* st, CompileQueue* queue) { 2983 if (queue != nullptr) { 2984 MutexLocker ml(queue->lock()); 2985 2986 uint total_cnt = 0; 2987 uint active_cnt = 0; 2988 for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) { 2989 guarantee(jt != nullptr, ""); 2990 if (jt->is_Compiler_thread()) { 2991 CompilerThread* ct = (CompilerThread*)jt; 2992 2993 guarantee(ct != nullptr, ""); 2994 if (ct->queue() == queue) { 2995 ++total_cnt; 2996 CompileTask* task = ct->task(); 2997 if (task != nullptr) { 2998 ++active_cnt; 2999 } 3000 } 3001 } 3002 } 3003 3004 st->print(" %s (%d active / %d total threads): %u tasks", 3005 queue->name(), active_cnt, total_cnt, queue->size()); 3006 if (queue->size() > 0) { 3007 uint counts[] = {0, 0, 0, 0, 0}; // T1 ... T5 3008 for (CompileTask* task = queue->first(); task != nullptr; task = task->next()) { 3009 int tier = task->comp_level(); 3010 if (task->is_aot_load() && task->preload()) { 3011 assert(tier == CompLevel_full_optimization, "%d", tier); 3012 tier = CompLevel_full_optimization + 1; 3013 } 3014 counts[tier-1]++; 3015 } 3016 st->print(":"); 3017 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3018 uint cnt = counts[tier-1]; 3019 if (cnt > 0) { 3020 st->print(" T%d: %u tasks;", tier, cnt); 3021 } 3022 } 3023 } 3024 st->cr(); 3025 3026 // for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) { 3027 // guarantee(jt != nullptr, ""); 3028 // if (jt->is_Compiler_thread()) { 3029 // CompilerThread* ct = (CompilerThread*)jt; 3030 // 3031 // guarantee(ct != nullptr, ""); 3032 // if (ct->queue() == queue) { 3033 // ResourceMark rm; 3034 // CompileTask* task = ct->task(); 3035 // st->print(" %s: ", ct->name_raw()); 3036 // if (task != nullptr) { 3037 // task->print(st, nullptr, true /*short_form*/, false /*cr*/); 3038 // } 3039 // st->cr(); 3040 // } 3041 // } 3042 // } 3043 } 3044 } 3045 void CompileBroker::print_statistics_on(outputStream* st) { 3046 st->print_cr(" Total: %u methods; %u bailouts, %u invalidated, %u non_entrant", 3047 _total_compile_count, _total_bailout_count, _total_invalidated_count, _total_not_entrant_count); 3048 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) { 3049 print_tier_helper(st, "Tier", tier, &_stats_per_level[tier-1]); 3050 } 3051 st->cr(); 3052 3053 if (AOTCodeCaching) { 3054 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3055 if (tier != CompLevel_full_profile) { 3056 print_tier_helper(st, "AOT Code T", tier, &_aot_stats_per_level[tier - 1]); 3057 } 3058 } 3059 st->cr(); 3060 } 3061 3062 print_queue_info(st, _c1_compile_queue); 3063 print_queue_info(st, _c2_compile_queue); 3064 print_queue_info(st, _ac1_compile_queue); 3065 print_queue_info(st, _ac2_compile_queue); 3066 } 3067 3068 void CompileBroker::print_times(bool per_compiler, bool aggregate) { 3069 if (per_compiler) { 3070 if (aggregate) { 3071 tty->cr(); 3072 tty->print_cr("[%dms] Individual compiler times (for compiled methods only)", (int)tty->time_stamp().milliseconds()); 3073 tty->print_cr("------------------------------------------------"); 3074 tty->cr(); 3075 } 3076 for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) { 3077 AbstractCompiler* comp = _compilers[i]; 3078 if (comp != nullptr) { 3079 print_times(comp->name(), comp->stats()); 3080 } 3081 } 3082 if (_aot_stats._standard._count > 0) { 3083 print_times("SC", &_aot_stats); 3084 } 3085 if (aggregate) { 3086 tty->cr(); 3087 tty->print_cr("Individual compilation Tier times (for compiled methods only)"); 3088 tty->print_cr("------------------------------------------------"); 3089 tty->cr(); 3090 } 3091 char tier_name[256]; 3092 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) { 3093 CompilerStatistics* stats = &_stats_per_level[tier-1]; 3094 os::snprintf_checked(tier_name, sizeof(tier_name), "Tier%d", tier); 3095 print_times(tier_name, stats); 3096 } 3097 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3098 CompilerStatistics* stats = &_aot_stats_per_level[tier-1]; 3099 if (stats->_standard._bytes > 0) { 3100 os::snprintf_checked(tier_name, sizeof(tier_name), "AOT Code T%d", tier); 3101 print_times(tier_name, stats); 3102 } 3103 } 3104 } 3105 3106 if (!aggregate) { 3107 return; 3108 } 3109 3110 elapsedTimer standard_compilation = CompileBroker::_t_standard_compilation; 3111 elapsedTimer osr_compilation = CompileBroker::_t_osr_compilation; 3112 elapsedTimer total_compilation = CompileBroker::_t_total_compilation; 3113 3114 uint standard_bytes_compiled = CompileBroker::_sum_standard_bytes_compiled; 3115 uint osr_bytes_compiled = CompileBroker::_sum_osr_bytes_compiled; 3116 3117 uint standard_compile_count = CompileBroker::_total_standard_compile_count; 3118 uint osr_compile_count = CompileBroker::_total_osr_compile_count; 3119 uint total_compile_count = CompileBroker::_total_compile_count; 3120 uint total_bailout_count = CompileBroker::_total_bailout_count; 3121 uint total_invalidated_count = CompileBroker::_total_invalidated_count; 3122 3123 uint nmethods_code_size = CompileBroker::_sum_nmethod_code_size; 3124 uint nmethods_size = CompileBroker::_sum_nmethod_size; 3125 3126 tty->cr(); 3127 tty->print_cr("Accumulated compiler times"); 3128 tty->print_cr("----------------------------------------------------------"); 3129 //0000000000111111111122222222223333333333444444444455555555556666666666 3130 //0123456789012345678901234567890123456789012345678901234567890123456789 3131 tty->print_cr(" Total compilation time : %7.3f s", total_compilation.seconds()); 3132 tty->print_cr(" Standard compilation : %7.3f s, Average : %2.3f s", 3133 standard_compilation.seconds(), 3134 standard_compile_count == 0 ? 0.0 : standard_compilation.seconds() / standard_compile_count); 3135 tty->print_cr(" Bailed out compilation : %7.3f s, Average : %2.3f s", 3136 CompileBroker::_t_bailedout_compilation.seconds(), 3137 total_bailout_count == 0 ? 0.0 : CompileBroker::_t_bailedout_compilation.seconds() / total_bailout_count); 3138 tty->print_cr(" On stack replacement : %7.3f s, Average : %2.3f s", 3139 osr_compilation.seconds(), 3140 osr_compile_count == 0 ? 0.0 : osr_compilation.seconds() / osr_compile_count); 3141 tty->print_cr(" Invalidated : %7.3f s, Average : %2.3f s", 3142 CompileBroker::_t_invalidated_compilation.seconds(), 3143 total_invalidated_count == 0 ? 0.0 : CompileBroker::_t_invalidated_compilation.seconds() / total_invalidated_count); 3144 3145 if (AOTCodeCaching) { // Check flags because AOT code cache could be closed already 3146 tty->cr(); 3147 AOTCodeCache::print_timers_on(tty); 3148 } 3149 AbstractCompiler *comp = compiler(CompLevel_simple); 3150 if (comp != nullptr) { 3151 tty->cr(); 3152 comp->print_timers(); 3153 } 3154 comp = compiler(CompLevel_full_optimization); 3155 if (comp != nullptr) { 3156 tty->cr(); 3157 comp->print_timers(); 3158 } 3159 #if INCLUDE_JVMCI 3160 if (EnableJVMCI) { 3161 JVMCICompiler *jvmci_comp = JVMCICompiler::instance(false, JavaThread::current_or_null()); 3162 if (jvmci_comp != nullptr && jvmci_comp != comp) { 3163 tty->cr(); 3164 jvmci_comp->print_timers(); 3165 } 3166 } 3167 #endif 3168 3169 tty->cr(); 3170 tty->print_cr(" Total compiled methods : %8u methods", total_compile_count); 3171 tty->print_cr(" Standard compilation : %8u methods", standard_compile_count); 3172 tty->print_cr(" On stack replacement : %8u methods", osr_compile_count); 3173 uint tcb = osr_bytes_compiled + standard_bytes_compiled; 3174 tty->print_cr(" Total compiled bytecodes : %8u bytes", tcb); 3175 tty->print_cr(" Standard compilation : %8u bytes", standard_bytes_compiled); 3176 tty->print_cr(" On stack replacement : %8u bytes", osr_bytes_compiled); 3177 double tcs = total_compilation.seconds(); 3178 uint bps = tcs == 0.0 ? 0 : (uint)(tcb / tcs); 3179 tty->print_cr(" Average compilation speed : %8u bytes/s", bps); 3180 tty->cr(); 3181 tty->print_cr(" nmethod code size : %8u bytes", nmethods_code_size); 3182 tty->print_cr(" nmethod total size : %8u bytes", nmethods_size); 3183 } 3184 3185 // Print general/accumulated JIT information. 3186 void CompileBroker::print_info(outputStream *out) { 3187 if (out == nullptr) out = tty; 3188 out->cr(); 3189 out->print_cr("======================"); 3190 out->print_cr(" General JIT info "); 3191 out->print_cr("======================"); 3192 out->cr(); 3193 out->print_cr(" JIT is : %7s", should_compile_new_jobs() ? "on" : "off"); 3194 out->print_cr(" Compiler threads : %7d", (int)CICompilerCount); 3195 out->cr(); 3196 out->print_cr("CodeCache overview"); 3197 out->print_cr("--------------------------------------------------------"); 3198 out->cr(); 3199 out->print_cr(" Reserved size : %7zu KB", CodeCache::max_capacity() / K); 3200 out->print_cr(" Committed size : %7zu KB", CodeCache::capacity() / K); 3201 out->print_cr(" Unallocated capacity : %7zu KB", CodeCache::unallocated_capacity() / K); 3202 out->cr(); 3203 } 3204 3205 // Note: tty_lock must not be held upon entry to this function. 3206 // Print functions called from herein do "micro-locking" on tty_lock. 3207 // That's a tradeoff which keeps together important blocks of output. 3208 // At the same time, continuous tty_lock hold time is kept in check, 3209 // preventing concurrently printing threads from stalling a long time. 3210 void CompileBroker::print_heapinfo(outputStream* out, const char* function, size_t granularity) { 3211 TimeStamp ts_total; 3212 TimeStamp ts_global; 3213 TimeStamp ts; 3214 3215 bool allFun = !strcmp(function, "all"); 3216 bool aggregate = !strcmp(function, "aggregate") || !strcmp(function, "analyze") || allFun; 3217 bool usedSpace = !strcmp(function, "UsedSpace") || allFun; 3218 bool freeSpace = !strcmp(function, "FreeSpace") || allFun; 3219 bool methodCount = !strcmp(function, "MethodCount") || allFun; 3220 bool methodSpace = !strcmp(function, "MethodSpace") || allFun; 3221 bool methodAge = !strcmp(function, "MethodAge") || allFun; 3222 bool methodNames = !strcmp(function, "MethodNames") || allFun; 3223 bool discard = !strcmp(function, "discard") || allFun; 3224 3225 if (out == nullptr) { 3226 out = tty; 3227 } 3228 3229 if (!(aggregate || usedSpace || freeSpace || methodCount || methodSpace || methodAge || methodNames || discard)) { 3230 out->print_cr("\n__ CodeHeapStateAnalytics: Function %s is not supported", function); 3231 out->cr(); 3232 return; 3233 } 3234 3235 ts_total.update(); // record starting point 3236 3237 if (aggregate) { 3238 print_info(out); 3239 } 3240 3241 // We hold the CodeHeapStateAnalytics_lock all the time, from here until we leave this function. 3242 // That prevents other threads from destroying (making inconsistent) our view on the CodeHeap. 3243 // When we request individual parts of the analysis via the jcmd interface, it is possible 3244 // that in between another thread (another jcmd user or the vm running into CodeCache OOM) 3245 // updated the aggregated data. We will then see a modified, but again consistent, view 3246 // on the CodeHeap. That's a tolerable tradeoff we have to accept because we can't hold 3247 // a lock across user interaction. 3248 3249 // We should definitely acquire this lock before acquiring Compile_lock and CodeCache_lock. 3250 // CodeHeapStateAnalytics_lock may be held by a concurrent thread for a long time, 3251 // leading to an unnecessarily long hold time of the other locks we acquired before. 3252 ts.update(); // record starting point 3253 MutexLocker mu0(CodeHeapStateAnalytics_lock, Mutex::_safepoint_check_flag); 3254 out->print_cr("\n__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________\n", ts.seconds()); 3255 3256 // Holding the CodeCache_lock protects from concurrent alterations of the CodeCache. 3257 // Unfortunately, such protection is not sufficient: 3258 // When a new nmethod is created via ciEnv::register_method(), the 3259 // Compile_lock is taken first. After some initializations, 3260 // nmethod::new_nmethod() takes over, grabbing the CodeCache_lock 3261 // immediately (after finalizing the oop references). To lock out concurrent 3262 // modifiers, we have to grab both locks as well in the described sequence. 3263 // 3264 // If we serve an "allFun" call, it is beneficial to hold CodeCache_lock and Compile_lock 3265 // for the entire duration of aggregation and printing. That makes sure we see 3266 // a consistent picture and do not run into issues caused by concurrent alterations. 3267 bool should_take_Compile_lock = !SafepointSynchronize::is_at_safepoint() && 3268 !Compile_lock->owned_by_self(); 3269 bool should_take_CodeCache_lock = !SafepointSynchronize::is_at_safepoint() && 3270 !CodeCache_lock->owned_by_self(); 3271 bool take_global_lock_1 = allFun && should_take_Compile_lock; 3272 bool take_global_lock_2 = allFun && should_take_CodeCache_lock; 3273 bool take_function_lock_1 = !allFun && should_take_Compile_lock; 3274 bool take_function_lock_2 = !allFun && should_take_CodeCache_lock; 3275 bool take_global_locks = take_global_lock_1 || take_global_lock_2; 3276 bool take_function_locks = take_function_lock_1 || take_function_lock_2; 3277 3278 ts_global.update(); // record starting point 3279 3280 ConditionalMutexLocker mu1(Compile_lock, take_global_lock_1, Mutex::_safepoint_check_flag); 3281 ConditionalMutexLocker mu2(CodeCache_lock, take_global_lock_2, Mutex::_no_safepoint_check_flag); 3282 if (take_global_locks) { 3283 out->print_cr("\n__ Compile & CodeCache (global) lock wait took %10.3f seconds _________\n", ts_global.seconds()); 3284 ts_global.update(); // record starting point 3285 } 3286 3287 if (aggregate) { 3288 ts.update(); // record starting point 3289 ConditionalMutexLocker mu11(Compile_lock, take_function_lock_1, Mutex::_safepoint_check_flag); 3290 ConditionalMutexLocker mu22(CodeCache_lock, take_function_lock_2, Mutex::_no_safepoint_check_flag); 3291 if (take_function_locks) { 3292 out->print_cr("\n__ Compile & CodeCache (function) lock wait took %10.3f seconds _________\n", ts.seconds()); 3293 } 3294 3295 ts.update(); // record starting point 3296 CodeCache::aggregate(out, granularity); 3297 if (take_function_locks) { 3298 out->print_cr("\n__ Compile & CodeCache (function) lock hold took %10.3f seconds _________\n", ts.seconds()); 3299 } 3300 } 3301 3302 if (usedSpace) CodeCache::print_usedSpace(out); 3303 if (freeSpace) CodeCache::print_freeSpace(out); 3304 if (methodCount) CodeCache::print_count(out); 3305 if (methodSpace) CodeCache::print_space(out); 3306 if (methodAge) CodeCache::print_age(out); 3307 if (methodNames) { 3308 if (allFun) { 3309 // print_names() can only be used safely if the locks have been continuously held 3310 // since aggregation begin. That is true only for function "all". 3311 CodeCache::print_names(out); 3312 } else { 3313 out->print_cr("\nCodeHeapStateAnalytics: Function 'MethodNames' is only available as part of function 'all'"); 3314 } 3315 } 3316 if (discard) CodeCache::discard(out); 3317 3318 if (take_global_locks) { 3319 out->print_cr("\n__ Compile & CodeCache (global) lock hold took %10.3f seconds _________\n", ts_global.seconds()); 3320 } 3321 out->print_cr("\n__ CodeHeapStateAnalytics total duration %10.3f seconds _________\n", ts_total.seconds()); 3322 }