1 /* 2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/aotLinkedClassBulkLoader.hpp" 26 #include "cds/cdsConfig.hpp" 27 #include "classfile/javaClasses.inline.hpp" 28 #include "classfile/symbolTable.hpp" 29 #include "classfile/vmClasses.hpp" 30 #include "classfile/vmSymbols.hpp" 31 #include "code/codeCache.hpp" 32 #include "code/codeHeapState.hpp" 33 #include "code/dependencyContext.hpp" 34 #include "code/SCCache.hpp" 35 #include "compiler/compilationLog.hpp" 36 #include "compiler/compilationMemoryStatistic.hpp" 37 #include "compiler/compilationPolicy.hpp" 38 #include "compiler/compileBroker.hpp" 39 #include "compiler/compilerDefinitions.inline.hpp" 40 #include "compiler/compileLog.hpp" 41 #include "compiler/compilerEvent.hpp" 42 #include "compiler/compilerOracle.hpp" 43 #include "compiler/directivesParser.hpp" 44 #include "compiler/recompilationPolicy.hpp" 45 #include "gc/shared/memAllocator.hpp" 46 #include "interpreter/linkResolver.hpp" 47 #include "jvm.h" 48 #include "jfr/jfrEvents.hpp" 49 #include "logging/log.hpp" 50 #include "logging/logStream.hpp" 51 #include "memory/allocation.inline.hpp" 52 #include "memory/resourceArea.hpp" 53 #include "memory/universe.hpp" 54 #include "oops/methodData.hpp" 55 #include "oops/method.inline.hpp" 56 #include "oops/oop.inline.hpp" 57 #include "prims/jvmtiExport.hpp" 58 #include "prims/nativeLookup.hpp" 59 #include "prims/whitebox.hpp" 60 #include "runtime/atomic.hpp" 61 #include "runtime/escapeBarrier.hpp" 62 #include "runtime/globals_extension.hpp" 63 #include "runtime/handles.inline.hpp" 64 #include "runtime/init.hpp" 65 #include "runtime/interfaceSupport.inline.hpp" 66 #include "runtime/java.hpp" 67 #include "runtime/javaCalls.hpp" 68 #include "runtime/jniHandles.inline.hpp" 69 #include "runtime/os.hpp" 70 #include "runtime/perfData.hpp" 71 #include "runtime/safepointVerifiers.hpp" 72 #include "runtime/sharedRuntime.hpp" 73 #include "runtime/threads.hpp" 74 #include "runtime/threadSMR.inline.hpp" 75 #include "runtime/timerTrace.hpp" 76 #include "runtime/vframe.inline.hpp" 77 #include "services/management.hpp" 78 #include "utilities/debug.hpp" 79 #include "utilities/dtrace.hpp" 80 #include "utilities/events.hpp" 81 #include "utilities/formatBuffer.hpp" 82 #include "utilities/macros.hpp" 83 #include "utilities/nonblockingQueue.inline.hpp" 84 #ifdef COMPILER1 85 #include "c1/c1_Compiler.hpp" 86 #endif 87 #ifdef COMPILER2 88 #include "opto/c2compiler.hpp" 89 #endif 90 #if INCLUDE_JVMCI 91 #include "jvmci/jvmciEnv.hpp" 92 #include "jvmci/jvmciRuntime.hpp" 93 #endif 94 95 #ifdef DTRACE_ENABLED 96 97 // Only bother with this argument setup if dtrace is available 98 99 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \ 100 { \ 101 Symbol* klass_name = (method)->klass_name(); \ 102 Symbol* name = (method)->name(); \ 103 Symbol* signature = (method)->signature(); \ 104 HOTSPOT_METHOD_COMPILE_BEGIN( \ 105 (char *) comp_name, strlen(comp_name), \ 106 (char *) klass_name->bytes(), klass_name->utf8_length(), \ 107 (char *) name->bytes(), name->utf8_length(), \ 108 (char *) signature->bytes(), signature->utf8_length()); \ 109 } 110 111 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \ 112 { \ 113 Symbol* klass_name = (method)->klass_name(); \ 114 Symbol* name = (method)->name(); \ 115 Symbol* signature = (method)->signature(); \ 116 HOTSPOT_METHOD_COMPILE_END( \ 117 (char *) comp_name, strlen(comp_name), \ 118 (char *) klass_name->bytes(), klass_name->utf8_length(), \ 119 (char *) name->bytes(), name->utf8_length(), \ 120 (char *) signature->bytes(), signature->utf8_length(), (success)); \ 121 } 122 123 #else // ndef DTRACE_ENABLED 124 125 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) 126 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) 127 128 #endif // ndef DTRACE_ENABLED 129 130 bool CompileBroker::_initialized = false; 131 volatile bool CompileBroker::_should_block = false; 132 volatile int CompileBroker::_print_compilation_warning = 0; 133 volatile jint CompileBroker::_should_compile_new_jobs = run_compilation; 134 135 // The installed compiler(s) 136 AbstractCompiler* CompileBroker::_compilers[3]; 137 138 // The maximum numbers of compiler threads to be determined during startup. 139 int CompileBroker::_c1_count = 0; 140 int CompileBroker::_c2_count = 0; 141 int CompileBroker::_c3_count = 0; 142 int CompileBroker::_sc_count = 0; 143 144 // An array of compiler names as Java String objects 145 jobject* CompileBroker::_compiler1_objects = nullptr; 146 jobject* CompileBroker::_compiler2_objects = nullptr; 147 jobject* CompileBroker::_compiler3_objects = nullptr; 148 jobject* CompileBroker::_sc_objects = nullptr; 149 150 CompileLog** CompileBroker::_compiler1_logs = nullptr; 151 CompileLog** CompileBroker::_compiler2_logs = nullptr; 152 CompileLog** CompileBroker::_compiler3_logs = nullptr; 153 CompileLog** CompileBroker::_sc_logs = nullptr; 154 155 // These counters are used to assign an unique ID to each compilation. 156 volatile jint CompileBroker::_compilation_id = 0; 157 volatile jint CompileBroker::_osr_compilation_id = 0; 158 volatile jint CompileBroker::_native_compilation_id = 0; 159 160 // Performance counters 161 PerfCounter* CompileBroker::_perf_total_compilation = nullptr; 162 PerfCounter* CompileBroker::_perf_osr_compilation = nullptr; 163 PerfCounter* CompileBroker::_perf_standard_compilation = nullptr; 164 165 PerfCounter* CompileBroker::_perf_total_bailout_count = nullptr; 166 PerfCounter* CompileBroker::_perf_total_invalidated_count = nullptr; 167 PerfCounter* CompileBroker::_perf_total_compile_count = nullptr; 168 PerfCounter* CompileBroker::_perf_total_osr_compile_count = nullptr; 169 PerfCounter* CompileBroker::_perf_total_standard_compile_count = nullptr; 170 171 PerfCounter* CompileBroker::_perf_sum_osr_bytes_compiled = nullptr; 172 PerfCounter* CompileBroker::_perf_sum_standard_bytes_compiled = nullptr; 173 PerfCounter* CompileBroker::_perf_sum_nmethod_size = nullptr; 174 PerfCounter* CompileBroker::_perf_sum_nmethod_code_size = nullptr; 175 176 PerfStringVariable* CompileBroker::_perf_last_method = nullptr; 177 PerfStringVariable* CompileBroker::_perf_last_failed_method = nullptr; 178 PerfStringVariable* CompileBroker::_perf_last_invalidated_method = nullptr; 179 PerfVariable* CompileBroker::_perf_last_compile_type = nullptr; 180 PerfVariable* CompileBroker::_perf_last_compile_size = nullptr; 181 PerfVariable* CompileBroker::_perf_last_failed_type = nullptr; 182 PerfVariable* CompileBroker::_perf_last_invalidated_type = nullptr; 183 184 // Timers and counters for generating statistics 185 elapsedTimer CompileBroker::_t_total_compilation; 186 elapsedTimer CompileBroker::_t_osr_compilation; 187 elapsedTimer CompileBroker::_t_standard_compilation; 188 elapsedTimer CompileBroker::_t_invalidated_compilation; 189 elapsedTimer CompileBroker::_t_bailedout_compilation; 190 191 uint CompileBroker::_total_bailout_count = 0; 192 uint CompileBroker::_total_invalidated_count = 0; 193 uint CompileBroker::_total_not_entrant_count = 0; 194 uint CompileBroker::_total_compile_count = 0; 195 uint CompileBroker::_total_osr_compile_count = 0; 196 uint CompileBroker::_total_standard_compile_count = 0; 197 uint CompileBroker::_total_compiler_stopped_count = 0; 198 uint CompileBroker::_total_compiler_restarted_count = 0; 199 200 uint CompileBroker::_sum_osr_bytes_compiled = 0; 201 uint CompileBroker::_sum_standard_bytes_compiled = 0; 202 uint CompileBroker::_sum_nmethod_size = 0; 203 uint CompileBroker::_sum_nmethod_code_size = 0; 204 205 jlong CompileBroker::_peak_compilation_time = 0; 206 207 CompilerStatistics CompileBroker::_stats_per_level[CompLevel_full_optimization]; 208 CompilerStatistics CompileBroker::_scc_stats; 209 CompilerStatistics CompileBroker::_scc_stats_per_level[CompLevel_full_optimization + 1]; 210 211 CompileQueue* CompileBroker::_c3_compile_queue = nullptr; 212 CompileQueue* CompileBroker::_c2_compile_queue = nullptr; 213 CompileQueue* CompileBroker::_c1_compile_queue = nullptr; 214 CompileQueue* CompileBroker::_sc1_compile_queue = nullptr; 215 CompileQueue* CompileBroker::_sc2_compile_queue = nullptr; 216 217 bool compileBroker_init() { 218 if (LogEvents) { 219 CompilationLog::init(); 220 } 221 222 // init directives stack, adding default directive 223 DirectivesStack::init(); 224 225 if (DirectivesParser::has_file()) { 226 return DirectivesParser::parse_from_flag(); 227 } else if (CompilerDirectivesPrint) { 228 // Print default directive even when no other was added 229 DirectivesStack::print(tty); 230 } 231 232 return true; 233 } 234 235 CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) { 236 CompilerThread* thread = CompilerThread::current(); 237 thread->set_task(task); 238 CompileLog* log = thread->log(); 239 if (log != nullptr && !task->is_unloaded()) task->log_task_start(log); 240 } 241 242 CompileTaskWrapper::~CompileTaskWrapper() { 243 CompilerThread* thread = CompilerThread::current(); 244 CompileTask* task = thread->task(); 245 CompileLog* log = thread->log(); 246 AbstractCompiler* comp = thread->compiler(); 247 if (log != nullptr && !task->is_unloaded()) task->log_task_done(log); 248 thread->set_task(nullptr); 249 thread->set_env(nullptr); 250 if (task->is_blocking()) { 251 bool free_task = false; 252 { 253 MutexLocker notifier(thread, task->lock()); 254 task->mark_complete(); 255 #if INCLUDE_JVMCI 256 if (comp->is_jvmci()) { 257 if (!task->has_waiter()) { 258 // The waiting thread timed out and thus did not free the task. 259 free_task = true; 260 } 261 task->set_blocking_jvmci_compile_state(nullptr); 262 } 263 #endif 264 if (!free_task) { 265 // Notify the waiting thread that the compilation has completed 266 // so that it can free the task. 267 task->lock()->notify_all(); 268 } 269 } 270 if (free_task) { 271 // The task can only be freed once the task lock is released. 272 CompileTask::free(task); 273 } 274 } else { 275 task->mark_complete(); 276 277 // By convention, the compiling thread is responsible for 278 // recycling a non-blocking CompileTask. 279 CompileTask::free(task); 280 } 281 } 282 283 /** 284 * Check if a CompilerThread can be removed and update count if requested. 285 */ 286 bool CompileBroker::can_remove(CompilerThread *ct, bool do_it) { 287 assert(UseDynamicNumberOfCompilerThreads, "or shouldn't be here"); 288 if (!ReduceNumberOfCompilerThreads) return false; 289 290 if (RecompilationPolicy::have_recompilation_work()) return false; 291 292 AbstractCompiler *compiler = ct->compiler(); 293 int compiler_count = compiler->num_compiler_threads(); 294 bool c1 = compiler->is_c1(); 295 296 // Keep at least 1 compiler thread of each type. 297 if (compiler_count < 2) return false; 298 299 // Keep thread alive for at least some time. 300 if (ct->idle_time_millis() < (c1 ? 500 : 100)) return false; 301 302 #if INCLUDE_JVMCI 303 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) { 304 // Handles for JVMCI thread objects may get released concurrently. 305 if (do_it) { 306 assert(CompileThread_lock->owner() == ct, "must be holding lock"); 307 } else { 308 // Skip check if it's the last thread and let caller check again. 309 return true; 310 } 311 } 312 #endif 313 314 // We only allow the last compiler thread of each type to get removed. 315 jobject last_compiler = c1 ? compiler1_object(compiler_count - 1) 316 : compiler2_object(compiler_count - 1); 317 if (ct->threadObj() == JNIHandles::resolve_non_null(last_compiler)) { 318 if (do_it) { 319 assert_locked_or_safepoint(CompileThread_lock); // Update must be consistent. 320 compiler->set_num_compiler_threads(compiler_count - 1); 321 #if INCLUDE_JVMCI 322 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) { 323 // Old j.l.Thread object can die when no longer referenced elsewhere. 324 JNIHandles::destroy_global(compiler2_object(compiler_count - 1)); 325 _compiler2_objects[compiler_count - 1] = nullptr; 326 } 327 #endif 328 } 329 return true; 330 } 331 return false; 332 } 333 334 /** 335 * Add a CompileTask to a CompileQueue. 336 */ 337 void CompileQueue::add(CompileTask* task) { 338 assert(_lock->owned_by_self(), "must own lock"); 339 340 task->set_next(nullptr); 341 task->set_prev(nullptr); 342 343 if (_last == nullptr) { 344 // The compile queue is empty. 345 assert(_first == nullptr, "queue is empty"); 346 _first = task; 347 _last = task; 348 } else { 349 // Append the task to the queue. 350 assert(_last->next() == nullptr, "not last"); 351 _last->set_next(task); 352 task->set_prev(_last); 353 _last = task; 354 } 355 ++_size; 356 ++_total_added; 357 if (_size > _peak_size) { 358 _peak_size = _size; 359 } 360 361 // Mark the method as being in the compile queue. 362 task->method()->set_queued_for_compilation(); 363 364 task->mark_queued(os::elapsed_counter()); 365 366 if (CIPrintCompileQueue) { 367 print_tty(); 368 } 369 370 if (LogCompilation && xtty != nullptr) { 371 task->log_task_queued(); 372 } 373 374 if (TrainingData::need_data() && !CDSConfig::is_dumping_final_static_archive()) { 375 CompileTrainingData* td = CompileTrainingData::make(task); 376 if (td != nullptr) { 377 task->set_training_data(td); 378 } 379 } 380 381 // Notify CompilerThreads that a task is available. 382 _lock->notify_all(); 383 } 384 385 void CompileQueue::add_pending(CompileTask* task) { 386 assert(_lock->owned_by_self() == false, "must NOT own lock"); 387 assert(UseLockFreeCompileQueues, ""); 388 task->method()->set_queued_for_compilation(); 389 _queue.push(*task); 390 // FIXME: additional coordination needed? e.g., is it possible for compiler thread to block w/o processing pending tasks? 391 if (is_empty()) { 392 MutexLocker ml(_lock); 393 _lock->notify_all(); 394 } 395 } 396 397 static bool process_pending(CompileTask* task) { 398 // guarantee(task->method()->queued_for_compilation(), ""); 399 if (task->is_unloaded()) { 400 return true; // unloaded 401 } 402 task->method()->set_queued_for_compilation(); // FIXME 403 if (task->method()->pending_queue_processed()) { 404 return true; // already queued 405 } 406 // Mark the method as being in the compile queue. 407 task->method()->set_pending_queue_processed(); 408 if (CompileBroker::compilation_is_complete(task->method(), task->osr_bci(), task->comp_level(), 409 task->requires_online_compilation(), task->compile_reason())) { 410 return true; // already compiled 411 } 412 return false; // active 413 } 414 415 void CompileQueue::transfer_pending() { 416 assert(_lock->owned_by_self(), "must own lock"); 417 418 CompileTask* task; 419 while ((task = _queue.pop()) != nullptr) { 420 bool is_stale = process_pending(task); 421 if (is_stale) { 422 task->set_next(_first_stale); 423 task->set_prev(nullptr); 424 _first_stale = task; 425 } else { 426 add(task); 427 } 428 } 429 } 430 431 /** 432 * Empties compilation queue by putting all compilation tasks onto 433 * a freelist. Furthermore, the method wakes up all threads that are 434 * waiting on a compilation task to finish. This can happen if background 435 * compilation is disabled. 436 */ 437 void CompileQueue::free_all() { 438 MutexLocker mu(_lock); 439 transfer_pending(); 440 441 CompileTask* next = _first; 442 443 // Iterate over all tasks in the compile queue 444 while (next != nullptr) { 445 CompileTask* current = next; 446 next = current->next(); 447 bool found_waiter = false; 448 { 449 MutexLocker ct_lock(current->lock()); 450 assert(current->waiting_for_completion_count() <= 1, "more than one thread are waiting for task"); 451 if (current->waiting_for_completion_count() > 0) { 452 // If another thread waits for this task, we must wake them up 453 // so they will stop waiting and free the task. 454 current->lock()->notify(); 455 found_waiter = true; 456 } 457 } 458 if (!found_waiter) { 459 // If no one was waiting for this task, we need to free it ourselves. In this case, the task 460 // is also certainly unlocked, because, again, there is no waiter. 461 // Otherwise, by convention, it's the waiters responsibility to free the task. 462 // Put the task back on the freelist. 463 CompileTask::free(current); 464 } 465 } 466 _first = nullptr; 467 _last = nullptr; 468 469 // Wake up all threads that block on the queue. 470 _lock->notify_all(); 471 } 472 473 /** 474 * Get the next CompileTask from a CompileQueue 475 */ 476 CompileTask* CompileQueue::get(CompilerThread* thread) { 477 // save methods from RedefineClasses across safepoint 478 // across compile queue lock below. 479 methodHandle save_method; 480 methodHandle save_hot_method; 481 482 MonitorLocker locker(_lock); 483 transfer_pending(); 484 485 RecompilationPolicy::sample_load_average(); 486 487 // If _first is null we have no more compile jobs. There are two reasons for 488 // having no compile jobs: First, we compiled everything we wanted. Second, 489 // we ran out of code cache so compilation has been disabled. In the latter 490 // case we perform code cache sweeps to free memory such that we can re-enable 491 // compilation. 492 while (_first == nullptr) { 493 // Exit loop if compilation is disabled forever 494 if (CompileBroker::is_compilation_disabled_forever()) { 495 return nullptr; 496 } 497 498 AbstractCompiler* compiler = thread->compiler(); 499 guarantee(compiler != nullptr, "Compiler object must exist"); 500 compiler->on_empty_queue(this, thread); 501 if (_first != nullptr) { 502 // The call to on_empty_queue may have temporarily unlocked the MCQ lock 503 // so check again whether any tasks were added to the queue. 504 break; 505 } 506 507 // If we have added stale tasks, there might be waiters that want 508 // the notification these tasks have failed. Normally, this would 509 // be done by a compiler thread that would perform the purge at 510 // the end of some compilation. But, if compile queue is empty, 511 // there is no guarantee compilers would run and do the purge. 512 // Do the purge here and now to unblock the waiters. 513 // Perform this until we run out of stale tasks. 514 while (_first_stale != nullptr) { 515 purge_stale_tasks(); 516 } 517 if (_first != nullptr) { 518 // Purge stale tasks may have transferred some new tasks, 519 // so check again. 520 break; 521 } 522 523 // If there are no compilation tasks and we can compile new jobs 524 // (i.e., there is enough free space in the code cache) there is 525 // no need to invoke the GC. 526 // We need a timed wait here, since compiler threads can exit if compilation 527 // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads 528 // is not critical and we do not want idle compiler threads to wake up too often. 529 locker.wait(5*1000); 530 531 transfer_pending(); // reacquired lock 532 533 if (RecompilationPolicy::have_recompilation_work()) return nullptr; 534 535 if (UseDynamicNumberOfCompilerThreads && _first == nullptr) { 536 // Still nothing to compile. Give caller a chance to stop this thread. 537 if (CompileBroker::can_remove(CompilerThread::current(), false)) return nullptr; 538 } 539 } 540 541 if (CompileBroker::is_compilation_disabled_forever()) { 542 return nullptr; 543 } 544 545 CompileTask* task; 546 { 547 NoSafepointVerifier nsv; 548 task = CompilationPolicy::select_task(this, thread); 549 if (task != nullptr) { 550 task = task->select_for_compilation(); 551 } 552 } 553 554 if (task != nullptr) { 555 // Save method pointers across unlock safepoint. The task is removed from 556 // the compilation queue, which is walked during RedefineClasses. 557 Thread* thread = Thread::current(); 558 save_method = methodHandle(thread, task->method()); 559 save_hot_method = methodHandle(thread, task->hot_method()); 560 561 remove(task); 562 } 563 purge_stale_tasks(); // may temporarily release MCQ lock 564 return task; 565 } 566 567 // Clean & deallocate stale compile tasks. 568 // Temporarily releases MethodCompileQueue lock. 569 void CompileQueue::purge_stale_tasks() { 570 assert(_lock->owned_by_self(), "must own lock"); 571 if (_first_stale != nullptr) { 572 // Stale tasks are purged when MCQ lock is released, 573 // but _first_stale updates are protected by MCQ lock. 574 // Once task processing starts and MCQ lock is released, 575 // other compiler threads can reuse _first_stale. 576 CompileTask* head = _first_stale; 577 _first_stale = nullptr; 578 { 579 MutexUnlocker ul(_lock); 580 for (CompileTask* task = head; task != nullptr; ) { 581 CompileTask* next_task = task->next(); 582 CompileTaskWrapper ctw(task); // Frees the task 583 task->set_failure_reason("stale task"); 584 task = next_task; 585 } 586 } 587 transfer_pending(); // transfer pending after reacquiring MCQ lock 588 } 589 } 590 591 void CompileQueue::remove(CompileTask* task) { 592 assert(_lock->owned_by_self(), "must own lock"); 593 if (task->prev() != nullptr) { 594 task->prev()->set_next(task->next()); 595 } else { 596 // max is the first element 597 assert(task == _first, "Sanity"); 598 _first = task->next(); 599 } 600 601 if (task->next() != nullptr) { 602 task->next()->set_prev(task->prev()); 603 } else { 604 // max is the last element 605 assert(task == _last, "Sanity"); 606 _last = task->prev(); 607 } 608 --_size; 609 ++_total_removed; 610 } 611 612 void CompileQueue::remove_and_mark_stale(CompileTask* task) { 613 assert(_lock->owned_by_self(), "must own lock"); 614 remove(task); 615 616 // Enqueue the task for reclamation (should be done outside MCQ lock) 617 task->set_next(_first_stale); 618 task->set_prev(nullptr); 619 _first_stale = task; 620 } 621 622 // methods in the compile queue need to be marked as used on the stack 623 // so that they don't get reclaimed by Redefine Classes 624 void CompileQueue::mark_on_stack() { 625 for (CompileTask* task = _first; task != nullptr; task = task->next()) { 626 task->mark_on_stack(); 627 } 628 for (CompileTask* task = _queue.first(); !_queue.is_end(task); task = task->next()) { 629 assert(task != nullptr, ""); 630 task->mark_on_stack(); 631 } 632 } 633 634 635 CompileQueue* CompileBroker::compile_queue(int comp_level, bool is_scc) { 636 if (is_c2_compile(comp_level)) return ((is_scc && (_sc_count > 0)) ? _sc2_compile_queue : _c2_compile_queue); 637 if (is_c1_compile(comp_level)) return ((is_scc && (_sc_count > 0)) ? _sc1_compile_queue : _c1_compile_queue); 638 return nullptr; 639 } 640 641 CompileQueue* CompileBroker::c1_compile_queue() { 642 return _c1_compile_queue; 643 } 644 645 CompileQueue* CompileBroker::c2_compile_queue() { 646 return _c2_compile_queue; 647 } 648 649 void CompileBroker::print_compile_queues(outputStream* st) { 650 st->print_cr("Current compiles: "); 651 652 char buf[2000]; 653 int buflen = sizeof(buf); 654 Threads::print_threads_compiling(st, buf, buflen, /* short_form = */ true); 655 656 st->cr(); 657 if (_c1_compile_queue != nullptr) { 658 _c1_compile_queue->print(st); 659 } 660 if (_c2_compile_queue != nullptr) { 661 _c2_compile_queue->print(st); 662 } 663 if (_c3_compile_queue != nullptr) { 664 _c3_compile_queue->print(st); 665 } 666 if (_sc1_compile_queue != nullptr) { 667 _sc1_compile_queue->print(st); 668 } 669 if (_sc2_compile_queue != nullptr) { 670 _sc2_compile_queue->print(st); 671 } 672 } 673 674 void CompileQueue::print(outputStream* st) { 675 assert_locked_or_safepoint(_lock); 676 st->print_cr("%s:", name()); 677 CompileTask* task = _first; 678 if (task == nullptr) { 679 st->print_cr("Empty"); 680 } else { 681 while (task != nullptr) { 682 task->print(st, nullptr, true, true); 683 task = task->next(); 684 } 685 } 686 st->cr(); 687 } 688 689 void CompileQueue::print_tty() { 690 stringStream ss; 691 // Dump the compile queue into a buffer before locking the tty 692 print(&ss); 693 { 694 ttyLocker ttyl; 695 tty->print("%s", ss.freeze()); 696 } 697 } 698 699 CompilerCounters::CompilerCounters() { 700 _current_method[0] = '\0'; 701 _compile_type = CompileBroker::no_compile; 702 } 703 704 #if INCLUDE_JFR && COMPILER2_OR_JVMCI 705 // It appends new compiler phase names to growable array phase_names(a new CompilerPhaseType mapping 706 // in compiler/compilerEvent.cpp) and registers it with its serializer. 707 // 708 // c2 uses explicit CompilerPhaseType idToPhase mapping in opto/phasetype.hpp, 709 // so if c2 is used, it should be always registered first. 710 // This function is called during vm initialization. 711 static void register_jfr_phasetype_serializer(CompilerType compiler_type) { 712 ResourceMark rm; 713 static bool first_registration = true; 714 if (compiler_type == compiler_jvmci) { 715 CompilerEvent::PhaseEvent::get_phase_id("NOT_A_PHASE_NAME", false, false, false); 716 first_registration = false; 717 #ifdef COMPILER2 718 } else if (compiler_type == compiler_c2) { 719 assert(first_registration, "invariant"); // c2 must be registered first. 720 for (int i = 0; i < PHASE_NUM_TYPES; i++) { 721 const char* phase_name = CompilerPhaseTypeHelper::to_description((CompilerPhaseType) i); 722 CompilerEvent::PhaseEvent::get_phase_id(phase_name, false, false, false); 723 } 724 first_registration = false; 725 #endif // COMPILER2 726 } 727 } 728 #endif // INCLUDE_JFR && COMPILER2_OR_JVMCI 729 730 // ------------------------------------------------------------------ 731 // CompileBroker::compilation_init 732 // 733 // Initialize the Compilation object 734 void CompileBroker::compilation_init(JavaThread* THREAD) { 735 // No need to initialize compilation system if we do not use it. 736 if (!UseCompiler) { 737 return; 738 } 739 // Set the interface to the current compiler(s). 740 _c1_count = CompilationPolicy::c1_count(); 741 _c2_count = CompilationPolicy::c2_count(); 742 _c3_count = CompilationPolicy::c3_count(); 743 _sc_count = CompilationPolicy::sc_count(); 744 745 #if INCLUDE_JVMCI 746 if (EnableJVMCI) { 747 // This is creating a JVMCICompiler singleton. 748 JVMCICompiler* jvmci = new JVMCICompiler(); 749 750 if (UseJVMCICompiler) { 751 _compilers[1] = jvmci; 752 if (FLAG_IS_DEFAULT(JVMCIThreads)) { 753 if (BootstrapJVMCI) { 754 // JVMCI will bootstrap so give it more threads 755 _c2_count = MIN2(32, os::active_processor_count()); 756 } 757 } else { 758 _c2_count = JVMCIThreads; 759 } 760 if (FLAG_IS_DEFAULT(JVMCIHostThreads)) { 761 } else { 762 #ifdef COMPILER1 763 _c1_count = JVMCIHostThreads; 764 #endif // COMPILER1 765 } 766 #ifdef COMPILER2 767 if (SCCache::is_on() && (_c3_count > 0)) { 768 _compilers[2] = new C2Compiler(); 769 } 770 #endif 771 } 772 } 773 #endif // INCLUDE_JVMCI 774 775 #ifdef COMPILER1 776 if (_c1_count > 0) { 777 _compilers[0] = new Compiler(); 778 } 779 #endif // COMPILER1 780 781 #ifdef COMPILER2 782 if (true JVMCI_ONLY( && !UseJVMCICompiler)) { 783 if (_c2_count > 0) { 784 _compilers[1] = new C2Compiler(); 785 // Register c2 first as c2 CompilerPhaseType idToPhase mapping is explicit. 786 // idToPhase mapping for c2 is in opto/phasetype.hpp 787 JFR_ONLY(register_jfr_phasetype_serializer(compiler_c2);) 788 } 789 } 790 #endif // COMPILER2 791 792 #if INCLUDE_JVMCI 793 // Register after c2 registration. 794 // JVMCI CompilerPhaseType idToPhase mapping is dynamic. 795 if (EnableJVMCI) { 796 JFR_ONLY(register_jfr_phasetype_serializer(compiler_jvmci);) 797 } 798 #endif // INCLUDE_JVMCI 799 800 if (CompilerOracle::should_collect_memstat()) { 801 CompilationMemoryStatistic::initialize(); 802 } 803 804 // Start the compiler thread(s) 805 init_compiler_threads(); 806 // totalTime performance counter is always created as it is required 807 // by the implementation of java.lang.management.CompilationMXBean. 808 { 809 // Ensure OOM leads to vm_exit_during_initialization. 810 EXCEPTION_MARK; 811 _perf_total_compilation = 812 PerfDataManager::create_counter(JAVA_CI, "totalTime", 813 PerfData::U_Ticks, CHECK); 814 } 815 816 if (UsePerfData) { 817 818 EXCEPTION_MARK; 819 820 // create the jvmstat performance counters 821 _perf_osr_compilation = 822 PerfDataManager::create_counter(SUN_CI, "osrTime", 823 PerfData::U_Ticks, CHECK); 824 825 _perf_standard_compilation = 826 PerfDataManager::create_counter(SUN_CI, "standardTime", 827 PerfData::U_Ticks, CHECK); 828 829 _perf_total_bailout_count = 830 PerfDataManager::create_counter(SUN_CI, "totalBailouts", 831 PerfData::U_Events, CHECK); 832 833 _perf_total_invalidated_count = 834 PerfDataManager::create_counter(SUN_CI, "totalInvalidates", 835 PerfData::U_Events, CHECK); 836 837 _perf_total_compile_count = 838 PerfDataManager::create_counter(SUN_CI, "totalCompiles", 839 PerfData::U_Events, CHECK); 840 _perf_total_osr_compile_count = 841 PerfDataManager::create_counter(SUN_CI, "osrCompiles", 842 PerfData::U_Events, CHECK); 843 844 _perf_total_standard_compile_count = 845 PerfDataManager::create_counter(SUN_CI, "standardCompiles", 846 PerfData::U_Events, CHECK); 847 848 _perf_sum_osr_bytes_compiled = 849 PerfDataManager::create_counter(SUN_CI, "osrBytes", 850 PerfData::U_Bytes, CHECK); 851 852 _perf_sum_standard_bytes_compiled = 853 PerfDataManager::create_counter(SUN_CI, "standardBytes", 854 PerfData::U_Bytes, CHECK); 855 856 _perf_sum_nmethod_size = 857 PerfDataManager::create_counter(SUN_CI, "nmethodSize", 858 PerfData::U_Bytes, CHECK); 859 860 _perf_sum_nmethod_code_size = 861 PerfDataManager::create_counter(SUN_CI, "nmethodCodeSize", 862 PerfData::U_Bytes, CHECK); 863 864 _perf_last_method = 865 PerfDataManager::create_string_variable(SUN_CI, "lastMethod", 866 CompilerCounters::cmname_buffer_length, 867 "", CHECK); 868 869 _perf_last_failed_method = 870 PerfDataManager::create_string_variable(SUN_CI, "lastFailedMethod", 871 CompilerCounters::cmname_buffer_length, 872 "", CHECK); 873 874 _perf_last_invalidated_method = 875 PerfDataManager::create_string_variable(SUN_CI, "lastInvalidatedMethod", 876 CompilerCounters::cmname_buffer_length, 877 "", CHECK); 878 879 _perf_last_compile_type = 880 PerfDataManager::create_variable(SUN_CI, "lastType", 881 PerfData::U_None, 882 (jlong)CompileBroker::no_compile, 883 CHECK); 884 885 _perf_last_compile_size = 886 PerfDataManager::create_variable(SUN_CI, "lastSize", 887 PerfData::U_Bytes, 888 (jlong)CompileBroker::no_compile, 889 CHECK); 890 891 892 _perf_last_failed_type = 893 PerfDataManager::create_variable(SUN_CI, "lastFailedType", 894 PerfData::U_None, 895 (jlong)CompileBroker::no_compile, 896 CHECK); 897 898 _perf_last_invalidated_type = 899 PerfDataManager::create_variable(SUN_CI, "lastInvalidatedType", 900 PerfData::U_None, 901 (jlong)CompileBroker::no_compile, 902 CHECK); 903 } 904 905 log_info(scc, init)("CompileBroker is initialized"); 906 _initialized = true; 907 } 908 909 Handle CompileBroker::create_thread_oop(const char* name, TRAPS) { 910 Handle thread_oop = JavaThread::create_system_thread_object(name, CHECK_NH); 911 return thread_oop; 912 } 913 914 void TrainingReplayThread::training_replay_thread_entry(JavaThread* thread, TRAPS) { 915 CompilationPolicy::replay_training_at_init_loop(thread); 916 } 917 918 #if defined(ASSERT) && COMPILER2_OR_JVMCI 919 // Entry for DeoptimizeObjectsALotThread. The threads are started in 920 // CompileBroker::init_compiler_threads() iff DeoptimizeObjectsALot is enabled 921 void DeoptimizeObjectsALotThread::deopt_objs_alot_thread_entry(JavaThread* thread, TRAPS) { 922 DeoptimizeObjectsALotThread* dt = ((DeoptimizeObjectsALotThread*) thread); 923 bool enter_single_loop; 924 { 925 MonitorLocker ml(dt, EscapeBarrier_lock, Mutex::_no_safepoint_check_flag); 926 static int single_thread_count = 0; 927 enter_single_loop = single_thread_count++ < DeoptimizeObjectsALotThreadCountSingle; 928 } 929 if (enter_single_loop) { 930 dt->deoptimize_objects_alot_loop_single(); 931 } else { 932 dt->deoptimize_objects_alot_loop_all(); 933 } 934 } 935 936 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each 937 // barrier targets a single thread which is selected round robin. 938 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_single() { 939 HandleMark hm(this); 940 while (true) { 941 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *deoptee_thread = jtiwh.next(); ) { 942 { // Begin new scope for escape barrier 943 HandleMarkCleaner hmc(this); 944 ResourceMark rm(this); 945 EscapeBarrier eb(true, this, deoptee_thread); 946 eb.deoptimize_objects(100); 947 } 948 // Now sleep after the escape barriers destructor resumed deoptee_thread. 949 sleep(DeoptimizeObjectsALotInterval); 950 } 951 } 952 } 953 954 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each 955 // barrier targets all java threads in the vm at once. 956 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_all() { 957 HandleMark hm(this); 958 while (true) { 959 { // Begin new scope for escape barrier 960 HandleMarkCleaner hmc(this); 961 ResourceMark rm(this); 962 EscapeBarrier eb(true, this); 963 eb.deoptimize_objects_all_threads(); 964 } 965 // Now sleep after the escape barriers destructor resumed the java threads. 966 sleep(DeoptimizeObjectsALotInterval); 967 } 968 } 969 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI 970 971 972 JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, CompileQueue* queue, AbstractCompiler* comp, JavaThread* THREAD) { 973 Handle thread_oop(THREAD, JNIHandles::resolve_non_null(thread_handle)); 974 975 if (java_lang_Thread::thread(thread_oop()) != nullptr) { 976 assert(type == compiler_t, "should only happen with reused compiler threads"); 977 // The compiler thread hasn't actually exited yet so don't try to reuse it 978 return nullptr; 979 } 980 981 JavaThread* new_thread = nullptr; 982 switch (type) { 983 case compiler_t: 984 assert(comp != nullptr, "Compiler instance missing."); 985 if (!InjectCompilerCreationFailure || comp->num_compiler_threads() == 0) { 986 CompilerCounters* counters = new CompilerCounters(); 987 new_thread = new CompilerThread(queue, counters); 988 } 989 break; 990 #if defined(ASSERT) && COMPILER2_OR_JVMCI 991 case deoptimizer_t: 992 new_thread = new DeoptimizeObjectsALotThread(); 993 break; 994 #endif // ASSERT 995 case training_replay_t: 996 new_thread = new TrainingReplayThread(); 997 break; 998 default: 999 ShouldNotReachHere(); 1000 } 1001 1002 // At this point the new CompilerThread data-races with this startup 1003 // thread (which is the main thread and NOT the VM thread). 1004 // This means Java bytecodes being executed at startup can 1005 // queue compile jobs which will run at whatever default priority the 1006 // newly created CompilerThread runs at. 1007 1008 1009 // At this point it may be possible that no osthread was created for the 1010 // JavaThread due to lack of resources. We will handle that failure below. 1011 // Also check new_thread so that static analysis is happy. 1012 if (new_thread != nullptr && new_thread->osthread() != nullptr) { 1013 1014 if (type == compiler_t) { 1015 CompilerThread::cast(new_thread)->set_compiler(comp); 1016 } 1017 1018 // Note that we cannot call os::set_priority because it expects Java 1019 // priorities and we are *explicitly* using OS priorities so that it's 1020 // possible to set the compiler thread priority higher than any Java 1021 // thread. 1022 1023 int native_prio = CompilerThreadPriority; 1024 if (native_prio == -1) { 1025 if (UseCriticalCompilerThreadPriority) { 1026 native_prio = os::java_to_os_priority[CriticalPriority]; 1027 } else { 1028 native_prio = os::java_to_os_priority[NearMaxPriority]; 1029 } 1030 } 1031 os::set_native_priority(new_thread, native_prio); 1032 1033 // Note that this only sets the JavaThread _priority field, which by 1034 // definition is limited to Java priorities and not OS priorities. 1035 JavaThread::start_internal_daemon(THREAD, new_thread, thread_oop, NearMaxPriority); 1036 1037 } else { // osthread initialization failure 1038 if (UseDynamicNumberOfCompilerThreads && type == compiler_t 1039 && comp->num_compiler_threads() > 0) { 1040 // The new thread is not known to Thread-SMR yet so we can just delete. 1041 delete new_thread; 1042 return nullptr; 1043 } else { 1044 vm_exit_during_initialization("java.lang.OutOfMemoryError", 1045 os::native_thread_creation_failed_msg()); 1046 } 1047 } 1048 1049 os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS) 1050 1051 return new_thread; 1052 } 1053 1054 static bool trace_compiler_threads() { 1055 LogTarget(Debug, jit, thread) lt; 1056 return TraceCompilerThreads || lt.is_enabled(); 1057 } 1058 1059 static jobject create_compiler_thread(AbstractCompiler* compiler, int i, TRAPS) { 1060 char name_buffer[256]; 1061 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", compiler->name(), i); 1062 Handle thread_oop = JavaThread::create_system_thread_object(name_buffer, CHECK_NULL); 1063 return JNIHandles::make_global(thread_oop); 1064 } 1065 1066 static void print_compiler_threads(stringStream& msg) { 1067 if (TraceCompilerThreads) { 1068 tty->print_cr("%7d %s", (int)tty->time_stamp().milliseconds(), msg.as_string()); 1069 } 1070 LogTarget(Debug, jit, thread) lt; 1071 if (lt.is_enabled()) { 1072 LogStream ls(lt); 1073 ls.print_cr("%s", msg.as_string()); 1074 } 1075 } 1076 1077 static void print_compiler_thread(JavaThread *ct) { 1078 if (trace_compiler_threads()) { 1079 ResourceMark rm; 1080 ThreadsListHandle tlh; // name() depends on the TLH. 1081 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1082 stringStream msg; 1083 msg.print("Added initial compiler thread %s", ct->name()); 1084 print_compiler_threads(msg); 1085 } 1086 } 1087 1088 void CompileBroker::init_compiler_threads() { 1089 // Ensure any exceptions lead to vm_exit_during_initialization. 1090 EXCEPTION_MARK; 1091 #if !defined(ZERO) 1092 assert(_c2_count > 0 || _c1_count > 0, "No compilers?"); 1093 #endif // !ZERO 1094 // Initialize the compilation queue 1095 if (_c2_count > 0) { 1096 const char* name = JVMCI_ONLY(UseJVMCICompiler ? "JVMCI compile queue" :) "C2 compile queue"; 1097 _c2_compile_queue = new CompileQueue(name, MethodCompileQueueC2_lock); 1098 _compiler2_objects = NEW_C_HEAP_ARRAY(jobject, _c2_count, mtCompiler); 1099 _compiler2_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c2_count, mtCompiler); 1100 } 1101 if (_c1_count > 0) { 1102 _c1_compile_queue = new CompileQueue("C1 compile queue", MethodCompileQueueC1_lock); 1103 _compiler1_objects = NEW_C_HEAP_ARRAY(jobject, _c1_count, mtCompiler); 1104 _compiler1_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c1_count, mtCompiler); 1105 } 1106 1107 if (_c3_count > 0) { 1108 const char* name = "C2 compile queue"; 1109 _c3_compile_queue = new CompileQueue(name, MethodCompileQueueC3_lock); 1110 _compiler3_objects = NEW_C_HEAP_ARRAY(jobject, _c3_count, mtCompiler); 1111 _compiler3_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c3_count, mtCompiler); 1112 } 1113 if (_sc_count > 0) { 1114 if (_c1_count > 0) { // C1 is present 1115 _sc1_compile_queue = new CompileQueue("C1 SC compile queue", MethodCompileQueueSC1_lock); 1116 } 1117 if (_c2_count > 0) { // C2 is present 1118 _sc2_compile_queue = new CompileQueue("C2 SC compile queue", MethodCompileQueueSC2_lock); 1119 } 1120 _sc_objects = NEW_C_HEAP_ARRAY(jobject, _sc_count, mtCompiler); 1121 _sc_logs = NEW_C_HEAP_ARRAY(CompileLog*, _sc_count, mtCompiler); 1122 } 1123 char name_buffer[256]; 1124 1125 for (int i = 0; i < _c2_count; i++) { 1126 // Create a name for our thread. 1127 jobject thread_handle = create_compiler_thread(_compilers[1], i, CHECK); 1128 _compiler2_objects[i] = thread_handle; 1129 _compiler2_logs[i] = nullptr; 1130 1131 if (!UseDynamicNumberOfCompilerThreads || i == 0) { 1132 JavaThread *ct = make_thread(compiler_t, thread_handle, _c2_compile_queue, _compilers[1], THREAD); 1133 assert(ct != nullptr, "should have been handled for initial thread"); 1134 _compilers[1]->set_num_compiler_threads(i + 1); 1135 print_compiler_thread(ct); 1136 } 1137 } 1138 1139 for (int i = 0; i < _c1_count; i++) { 1140 // Create a name for our thread. 1141 jobject thread_handle = create_compiler_thread(_compilers[0], i, CHECK); 1142 _compiler1_objects[i] = thread_handle; 1143 _compiler1_logs[i] = nullptr; 1144 1145 if (!UseDynamicNumberOfCompilerThreads || i == 0) { 1146 JavaThread *ct = make_thread(compiler_t, thread_handle, _c1_compile_queue, _compilers[0], THREAD); 1147 assert(ct != nullptr, "should have been handled for initial thread"); 1148 _compilers[0]->set_num_compiler_threads(i + 1); 1149 print_compiler_thread(ct); 1150 } 1151 } 1152 1153 for (int i = 0; i < _c3_count; i++) { 1154 // Create a name for our thread. 1155 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C2 CompilerThread%d", i); 1156 Handle thread_oop = create_thread_oop(name_buffer, CHECK); 1157 jobject thread_handle = JNIHandles::make_global(thread_oop); 1158 _compiler3_objects[i] = thread_handle; 1159 _compiler3_logs[i] = nullptr; 1160 1161 JavaThread *ct = make_thread(compiler_t, thread_handle, _c3_compile_queue, _compilers[2], THREAD); 1162 assert(ct != nullptr, "should have been handled for initial thread"); 1163 _compilers[2]->set_num_compiler_threads(i + 1); 1164 print_compiler_thread(ct); 1165 } 1166 1167 if (_sc_count > 0) { 1168 int i = 0; 1169 if (_c1_count > 0) { // C1 is present 1170 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d SC CompilerThread", 1); 1171 Handle thread_oop = create_thread_oop(name_buffer, CHECK); 1172 jobject thread_handle = JNIHandles::make_global(thread_oop); 1173 _sc_objects[i] = thread_handle; 1174 _sc_logs[i] = nullptr; 1175 i++; 1176 1177 JavaThread *ct = make_thread(compiler_t, thread_handle, _sc1_compile_queue, _compilers[0], THREAD); 1178 assert(ct != nullptr, "should have been handled for initial thread"); 1179 print_compiler_thread(ct); 1180 } 1181 if (_c2_count > 0) { // C2 is present 1182 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d SC CompilerThread", 2); 1183 Handle thread_oop = create_thread_oop(name_buffer, CHECK); 1184 jobject thread_handle = JNIHandles::make_global(thread_oop); 1185 _sc_objects[i] = thread_handle; 1186 _sc_logs[i] = nullptr; 1187 1188 JavaThread *ct = make_thread(compiler_t, thread_handle, _sc2_compile_queue, _compilers[1], THREAD); 1189 assert(ct != nullptr, "should have been handled for initial thread"); 1190 print_compiler_thread(ct); 1191 } 1192 } 1193 1194 if (UsePerfData) { 1195 PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, _c1_count + _c2_count + _c3_count, CHECK); 1196 } 1197 1198 #if defined(ASSERT) && COMPILER2_OR_JVMCI 1199 if (DeoptimizeObjectsALot) { 1200 // Initialize and start the object deoptimizer threads 1201 const int total_count = DeoptimizeObjectsALotThreadCountSingle + DeoptimizeObjectsALotThreadCountAll; 1202 for (int count = 0; count < total_count; count++) { 1203 Handle thread_oop = JavaThread::create_system_thread_object("Deoptimize objects a lot single mode", CHECK); 1204 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); 1205 make_thread(deoptimizer_t, thread_handle, nullptr, nullptr, THREAD); 1206 } 1207 } 1208 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI 1209 } 1210 1211 void CompileBroker::init_training_replay() { 1212 // Ensure any exceptions lead to vm_exit_during_initialization. 1213 EXCEPTION_MARK; 1214 if (TrainingData::have_data()) { 1215 Handle thread_oop = create_thread_oop("Training replay thread", CHECK); 1216 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); 1217 make_thread(training_replay_t, thread_handle, nullptr, nullptr, THREAD); 1218 } 1219 } 1220 1221 void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) { 1222 1223 int old_c2_count = 0, new_c2_count = 0, old_c1_count = 0, new_c1_count = 0; 1224 const int c2_tasks_per_thread = 2, c1_tasks_per_thread = 4; 1225 1226 // Quick check if we already have enough compiler threads without taking the lock. 1227 // Numbers may change concurrently, so we read them again after we have the lock. 1228 if (_c2_compile_queue != nullptr) { 1229 old_c2_count = get_c2_thread_count(); 1230 new_c2_count = MIN2(_c2_count, _c2_compile_queue->size() / c2_tasks_per_thread); 1231 } 1232 if (_c1_compile_queue != nullptr) { 1233 old_c1_count = get_c1_thread_count(); 1234 new_c1_count = MIN2(_c1_count, _c1_compile_queue->size() / c1_tasks_per_thread); 1235 } 1236 if (new_c2_count <= old_c2_count && new_c1_count <= old_c1_count) return; 1237 1238 // Now, we do the more expensive operations. 1239 julong free_memory = os::free_memory(); 1240 // If SegmentedCodeCache is off, both values refer to the single heap (with type CodeBlobType::All). 1241 size_t available_cc_np = CodeCache::unallocated_capacity(CodeBlobType::MethodNonProfiled), 1242 available_cc_p = CodeCache::unallocated_capacity(CodeBlobType::MethodProfiled); 1243 1244 // Only attempt to start additional threads if the lock is free. 1245 if (!CompileThread_lock->try_lock()) return; 1246 1247 if (_c2_compile_queue != nullptr) { 1248 old_c2_count = get_c2_thread_count(); 1249 new_c2_count = MIN4(_c2_count, 1250 _c2_compile_queue->size() / c2_tasks_per_thread, 1251 (int)(free_memory / (200*M)), 1252 (int)(available_cc_np / (128*K))); 1253 1254 for (int i = old_c2_count; i < new_c2_count; i++) { 1255 #if INCLUDE_JVMCI 1256 if (UseJVMCICompiler && !UseJVMCINativeLibrary && _compiler2_objects[i] == nullptr) { 1257 // Native compiler threads as used in C1/C2 can reuse the j.l.Thread objects as their 1258 // existence is completely hidden from the rest of the VM (and those compiler threads can't 1259 // call Java code to do the creation anyway). 1260 // 1261 // For pure Java JVMCI we have to create new j.l.Thread objects as they are visible and we 1262 // can see unexpected thread lifecycle transitions if we bind them to new JavaThreads. For 1263 // native library JVMCI it's preferred to use the C1/C2 strategy as this avoids unnecessary 1264 // coupling with Java. 1265 if (!THREAD->can_call_java()) break; 1266 char name_buffer[256]; 1267 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", _compilers[1]->name(), i); 1268 Handle thread_oop; 1269 { 1270 // We have to give up the lock temporarily for the Java calls. 1271 MutexUnlocker mu(CompileThread_lock); 1272 thread_oop = JavaThread::create_system_thread_object(name_buffer, THREAD); 1273 } 1274 if (HAS_PENDING_EXCEPTION) { 1275 if (trace_compiler_threads()) { 1276 ResourceMark rm; 1277 stringStream msg; 1278 msg.print_cr("JVMCI compiler thread creation failed:"); 1279 PENDING_EXCEPTION->print_on(&msg); 1280 print_compiler_threads(msg); 1281 } 1282 CLEAR_PENDING_EXCEPTION; 1283 break; 1284 } 1285 // Check if another thread has beaten us during the Java calls. 1286 if (get_c2_thread_count() != i) break; 1287 jobject thread_handle = JNIHandles::make_global(thread_oop); 1288 assert(compiler2_object(i) == nullptr, "Old one must be released!"); 1289 _compiler2_objects[i] = thread_handle; 1290 } 1291 #endif 1292 guarantee(compiler2_object(i) != nullptr, "Thread oop must exist"); 1293 JavaThread *ct = make_thread(compiler_t, compiler2_object(i), _c2_compile_queue, _compilers[1], THREAD); 1294 if (ct == nullptr) break; 1295 _compilers[1]->set_num_compiler_threads(i + 1); 1296 if (trace_compiler_threads()) { 1297 ResourceMark rm; 1298 ThreadsListHandle tlh; // name() depends on the TLH. 1299 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1300 stringStream msg; 1301 msg.print("Added compiler thread %s (free memory: %dMB, available non-profiled code cache: %dMB)", 1302 ct->name(), (int)(free_memory/M), (int)(available_cc_np/M)); 1303 print_compiler_threads(msg); 1304 } 1305 } 1306 } 1307 1308 if (_c1_compile_queue != nullptr) { 1309 old_c1_count = get_c1_thread_count(); 1310 new_c1_count = MIN4(_c1_count, 1311 _c1_compile_queue->size() / c1_tasks_per_thread, 1312 (int)(free_memory / (100*M)), 1313 (int)(available_cc_p / (128*K))); 1314 1315 for (int i = old_c1_count; i < new_c1_count; i++) { 1316 JavaThread *ct = make_thread(compiler_t, compiler1_object(i), _c1_compile_queue, _compilers[0], THREAD); 1317 if (ct == nullptr) break; 1318 _compilers[0]->set_num_compiler_threads(i + 1); 1319 if (trace_compiler_threads()) { 1320 ResourceMark rm; 1321 ThreadsListHandle tlh; // name() depends on the TLH. 1322 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1323 stringStream msg; 1324 msg.print("Added compiler thread %s (free memory: %dMB, available profiled code cache: %dMB)", 1325 ct->name(), (int)(free_memory/M), (int)(available_cc_p/M)); 1326 print_compiler_threads(msg); 1327 } 1328 } 1329 } 1330 1331 CompileThread_lock->unlock(); 1332 } 1333 1334 1335 /** 1336 * Set the methods on the stack as on_stack so that redefine classes doesn't 1337 * reclaim them. This method is executed at a safepoint. 1338 */ 1339 void CompileBroker::mark_on_stack() { 1340 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 1341 // Since we are at a safepoint, we do not need a lock to access 1342 // the compile queues. 1343 if (_c3_compile_queue != nullptr) { 1344 _c3_compile_queue->mark_on_stack(); 1345 } 1346 if (_c2_compile_queue != nullptr) { 1347 _c2_compile_queue->mark_on_stack(); 1348 } 1349 if (_c1_compile_queue != nullptr) { 1350 _c1_compile_queue->mark_on_stack(); 1351 } 1352 if (_sc1_compile_queue != nullptr) { 1353 _sc1_compile_queue->mark_on_stack(); 1354 } 1355 if (_sc2_compile_queue != nullptr) { 1356 _sc2_compile_queue->mark_on_stack(); 1357 } 1358 } 1359 1360 // ------------------------------------------------------------------ 1361 // CompileBroker::compile_method 1362 // 1363 // Request compilation of a method. 1364 void CompileBroker::compile_method_base(const methodHandle& method, 1365 int osr_bci, 1366 int comp_level, 1367 const methodHandle& hot_method, 1368 int hot_count, 1369 CompileTask::CompileReason compile_reason, 1370 bool requires_online_compilation, 1371 bool blocking, 1372 Thread* thread) { 1373 guarantee(!method->is_abstract(), "cannot compile abstract methods"); 1374 assert(method->method_holder()->is_instance_klass(), 1375 "sanity check"); 1376 assert(!method->method_holder()->is_not_initialized() || 1377 compile_reason == CompileTask::Reason_Preload || 1378 compile_reason == CompileTask::Reason_Precompile || 1379 compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized"); 1380 assert(!method->is_method_handle_intrinsic(), "do not enqueue these guys"); 1381 1382 if (CIPrintRequests) { 1383 tty->print("request: "); 1384 method->print_short_name(tty); 1385 if (osr_bci != InvocationEntryBci) { 1386 tty->print(" osr_bci: %d", osr_bci); 1387 } 1388 tty->print(" level: %d comment: %s count: %d", comp_level, CompileTask::reason_name(compile_reason), hot_count); 1389 if (!hot_method.is_null()) { 1390 tty->print(" hot: "); 1391 if (hot_method() != method()) { 1392 hot_method->print_short_name(tty); 1393 } else { 1394 tty->print("yes"); 1395 } 1396 } 1397 tty->cr(); 1398 } 1399 1400 // A request has been made for compilation. Before we do any 1401 // real work, check to see if the method has been compiled 1402 // in the meantime with a definitive result. 1403 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1404 return; 1405 } 1406 1407 #ifndef PRODUCT 1408 if (osr_bci != -1 && !FLAG_IS_DEFAULT(OSROnlyBCI)) { 1409 if ((OSROnlyBCI > 0) ? (OSROnlyBCI != osr_bci) : (-OSROnlyBCI == osr_bci)) { 1410 // Positive OSROnlyBCI means only compile that bci. Negative means don't compile that BCI. 1411 return; 1412 } 1413 } 1414 #endif 1415 1416 // If this method is already in the compile queue, then 1417 // we do not block the current thread. 1418 if (compilation_is_in_queue(method)) { 1419 // We may want to decay our counter a bit here to prevent 1420 // multiple denied requests for compilation. This is an 1421 // open compilation policy issue. Note: The other possibility, 1422 // in the case that this is a blocking compile request, is to have 1423 // all subsequent blocking requesters wait for completion of 1424 // ongoing compiles. Note that in this case we'll need a protocol 1425 // for freeing the associated compile tasks. [Or we could have 1426 // a single static monitor on which all these waiters sleep.] 1427 return; 1428 } 1429 1430 // Tiered policy requires MethodCounters to exist before adding a method to 1431 // the queue. Create if we don't have them yet. 1432 if (compile_reason != CompileTask::Reason_Preload) { 1433 method->get_method_counters(thread); 1434 } 1435 1436 SCCEntry* scc_entry = find_scc_entry(method, osr_bci, comp_level, compile_reason, requires_online_compilation); 1437 bool is_scc = (scc_entry != nullptr); 1438 1439 // Outputs from the following MutexLocker block: 1440 CompileTask* task = nullptr; 1441 CompileQueue* queue; 1442 #if INCLUDE_JVMCI 1443 if (is_c2_compile(comp_level) && compiler2()->is_jvmci() && compiler3() != nullptr && 1444 ((JVMCICompiler*)compiler2())->force_comp_at_level_simple(method)) { 1445 assert(_c3_compile_queue != nullptr, "sanity"); 1446 queue = _c3_compile_queue; // JVMCI compiler's methods compilation 1447 } else 1448 #endif 1449 queue = compile_queue(comp_level, is_scc); 1450 1451 // Acquire our lock. 1452 { 1453 ConditionalMutexLocker locker(thread, queue->lock(), !UseLockFreeCompileQueues); 1454 1455 // Make sure the method has not slipped into the queues since 1456 // last we checked; note that those checks were "fast bail-outs". 1457 // Here we need to be more careful, see 14012000 below. 1458 if (compilation_is_in_queue(method)) { 1459 return; 1460 } 1461 1462 // We need to check again to see if the compilation has 1463 // completed. A previous compilation may have registered 1464 // some result. 1465 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1466 return; 1467 } 1468 1469 // We now know that this compilation is not pending, complete, 1470 // or prohibited. Assign a compile_id to this compilation 1471 // and check to see if it is in our [Start..Stop) range. 1472 int compile_id = assign_compile_id(method, osr_bci); 1473 if (compile_id == 0) { 1474 // The compilation falls outside the allowed range. 1475 return; 1476 } 1477 1478 #if INCLUDE_JVMCI 1479 if (UseJVMCICompiler && blocking) { 1480 // Don't allow blocking compiles for requests triggered by JVMCI. 1481 if (thread->is_Compiler_thread()) { 1482 blocking = false; 1483 } 1484 1485 // In libjvmci, JVMCI initialization should not deadlock with other threads 1486 if (!UseJVMCINativeLibrary) { 1487 // Don't allow blocking compiles if inside a class initializer or while performing class loading 1488 vframeStream vfst(JavaThread::cast(thread)); 1489 for (; !vfst.at_end(); vfst.next()) { 1490 if (vfst.method()->is_static_initializer() || 1491 (vfst.method()->method_holder()->is_subclass_of(vmClasses::ClassLoader_klass()) && 1492 vfst.method()->name() == vmSymbols::loadClass_name())) { 1493 blocking = false; 1494 break; 1495 } 1496 } 1497 1498 // Don't allow blocking compilation requests to JVMCI 1499 // if JVMCI itself is not yet initialized 1500 if (!JVMCI::is_compiler_initialized() && compiler(comp_level)->is_jvmci()) { 1501 blocking = false; 1502 } 1503 } 1504 1505 // Don't allow blocking compilation requests if we are in JVMCIRuntime::shutdown 1506 // to avoid deadlock between compiler thread(s) and threads run at shutdown 1507 // such as the DestroyJavaVM thread. 1508 if (JVMCI::in_shutdown()) { 1509 blocking = false; 1510 } 1511 } 1512 #endif // INCLUDE_JVMCI 1513 1514 // We will enter the compilation in the queue. 1515 // 14012000: Note that this sets the queued_for_compile bits in 1516 // the target method. We can now reason that a method cannot be 1517 // queued for compilation more than once, as follows: 1518 // Before a thread queues a task for compilation, it first acquires 1519 // the compile queue lock, then checks if the method's queued bits 1520 // are set or it has already been compiled. Thus there can not be two 1521 // instances of a compilation task for the same method on the 1522 // compilation queue. Consider now the case where the compilation 1523 // thread has already removed a task for that method from the queue 1524 // and is in the midst of compiling it. In this case, the 1525 // queued_for_compile bits must be set in the method (and these 1526 // will be visible to the current thread, since the bits were set 1527 // under protection of the compile queue lock, which we hold now. 1528 // When the compilation completes, the compiler thread first sets 1529 // the compilation result and then clears the queued_for_compile 1530 // bits. Neither of these actions are protected by a barrier (or done 1531 // under the protection of a lock), so the only guarantee we have 1532 // (on machines with TSO (Total Store Order)) is that these values 1533 // will update in that order. As a result, the only combinations of 1534 // these bits that the current thread will see are, in temporal order: 1535 // <RESULT, QUEUE> : 1536 // <0, 1> : in compile queue, but not yet compiled 1537 // <1, 1> : compiled but queue bit not cleared 1538 // <1, 0> : compiled and queue bit cleared 1539 // Because we first check the queue bits then check the result bits, 1540 // we are assured that we cannot introduce a duplicate task. 1541 // Note that if we did the tests in the reverse order (i.e. check 1542 // result then check queued bit), we could get the result bit before 1543 // the compilation completed, and the queue bit after the compilation 1544 // completed, and end up introducing a "duplicate" (redundant) task. 1545 // In that case, the compiler thread should first check if a method 1546 // has already been compiled before trying to compile it. 1547 // NOTE: in the event that there are multiple compiler threads and 1548 // there is de-optimization/recompilation, things will get hairy, 1549 // and in that case it's best to protect both the testing (here) of 1550 // these bits, and their updating (here and elsewhere) under a 1551 // common lock. 1552 task = create_compile_task(queue, 1553 compile_id, method, 1554 osr_bci, comp_level, 1555 hot_method, hot_count, scc_entry, compile_reason, 1556 requires_online_compilation, blocking); 1557 1558 if (task->is_scc() && (_sc_count > 0)) { 1559 // Put it on SC queue 1560 queue = is_c1_compile(comp_level) ? _sc1_compile_queue : _sc2_compile_queue; 1561 } 1562 1563 if (UseLockFreeCompileQueues) { 1564 assert(queue->lock()->owned_by_self() == false, ""); 1565 queue->add_pending(task); 1566 } else { 1567 queue->add(task); 1568 } 1569 } 1570 1571 if (blocking) { 1572 wait_for_completion(task); 1573 } 1574 } 1575 1576 SCCEntry* CompileBroker::find_scc_entry(const methodHandle& method, int osr_bci, int comp_level, 1577 CompileTask::CompileReason compile_reason, 1578 bool requires_online_compilation) { 1579 SCCEntry* scc_entry = nullptr; 1580 if (osr_bci == InvocationEntryBci && !requires_online_compilation && SCCache::is_on_for_read()) { 1581 // Check for cached code. 1582 if (compile_reason == CompileTask::Reason_Preload) { 1583 scc_entry = method->scc_entry(); 1584 assert(scc_entry != nullptr && scc_entry->for_preload(), "sanity"); 1585 } else { 1586 scc_entry = SCCache::find_code_entry(method, comp_level); 1587 } 1588 } 1589 return scc_entry; 1590 } 1591 1592 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, 1593 int comp_level, 1594 const methodHandle& hot_method, int hot_count, 1595 bool requires_online_compilation, 1596 CompileTask::CompileReason compile_reason, 1597 TRAPS) { 1598 // Do nothing if compilebroker is not initialized or compiles are submitted on level none 1599 if (!_initialized || comp_level == CompLevel_none) { 1600 return nullptr; 1601 } 1602 1603 #if INCLUDE_JVMCI 1604 if (EnableJVMCI && UseJVMCICompiler && 1605 comp_level == CompLevel_full_optimization && !AOTLinkedClassBulkLoader::class_preloading_finished()) { 1606 return nullptr; 1607 } 1608 #endif 1609 1610 AbstractCompiler *comp = CompileBroker::compiler(comp_level); 1611 assert(comp != nullptr, "Ensure we have a compiler"); 1612 1613 #if INCLUDE_JVMCI 1614 if (comp->is_jvmci() && !JVMCI::can_initialize_JVMCI()) { 1615 // JVMCI compilation is not yet initializable. 1616 return nullptr; 1617 } 1618 #endif 1619 1620 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp); 1621 // CompileBroker::compile_method can trap and can have pending async exception. 1622 nmethod* nm = CompileBroker::compile_method(method, osr_bci, comp_level, hot_method, hot_count, requires_online_compilation, compile_reason, directive, THREAD); 1623 DirectivesStack::release(directive); 1624 return nm; 1625 } 1626 1627 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, 1628 int comp_level, 1629 const methodHandle& hot_method, int hot_count, 1630 bool requires_online_compilation, 1631 CompileTask::CompileReason compile_reason, 1632 DirectiveSet* directive, 1633 TRAPS) { 1634 1635 // make sure arguments make sense 1636 assert(method->method_holder()->is_instance_klass(), "not an instance method"); 1637 assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range"); 1638 assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods"); 1639 assert(!method->method_holder()->is_not_initialized() || 1640 compile_reason == CompileTask::Reason_Preload || 1641 compile_reason == CompileTask::Reason_Precompile || 1642 compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized"); 1643 // return quickly if possible 1644 1645 if (PrecompileOnlyAndExit && !CompileTask::reason_is_precompiled(compile_reason)) { 1646 return nullptr; 1647 } 1648 1649 // lock, make sure that the compilation 1650 // isn't prohibited in a straightforward way. 1651 AbstractCompiler* comp = CompileBroker::compiler(comp_level); 1652 if (comp == nullptr || compilation_is_prohibited(method, osr_bci, comp_level, directive->ExcludeOption)) { 1653 return nullptr; 1654 } 1655 1656 if (osr_bci == InvocationEntryBci) { 1657 // standard compilation 1658 nmethod* method_code = method->code(); 1659 if (method_code != nullptr) { 1660 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1661 return method_code; 1662 } 1663 } 1664 if (method->is_not_compilable(comp_level)) { 1665 return nullptr; 1666 } 1667 } else { 1668 // osr compilation 1669 // We accept a higher level osr method 1670 nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false); 1671 if (nm != nullptr) return nm; 1672 if (method->is_not_osr_compilable(comp_level)) return nullptr; 1673 } 1674 1675 assert(!HAS_PENDING_EXCEPTION, "No exception should be present"); 1676 // some prerequisites that are compiler specific 1677 if (compile_reason != CompileTask::Reason_Preload && (comp->is_c2() || comp->is_jvmci())) { 1678 InternalOOMEMark iom(THREAD); 1679 method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NONASYNC_NULL); 1680 // Resolve all classes seen in the signature of the method 1681 // we are compiling. 1682 Method::load_signature_classes(method, CHECK_AND_CLEAR_NONASYNC_NULL); 1683 } 1684 1685 // If the method is native, do the lookup in the thread requesting 1686 // the compilation. Native lookups can load code, which is not 1687 // permitted during compilation. 1688 // 1689 // Note: A native method implies non-osr compilation which is 1690 // checked with an assertion at the entry of this method. 1691 if (method->is_native() && !method->is_method_handle_intrinsic()) { 1692 address adr = NativeLookup::lookup(method, THREAD); 1693 if (HAS_PENDING_EXCEPTION) { 1694 // In case of an exception looking up the method, we just forget 1695 // about it. The interpreter will kick-in and throw the exception. 1696 method->set_not_compilable("NativeLookup::lookup failed"); // implies is_not_osr_compilable() 1697 CLEAR_PENDING_EXCEPTION; 1698 return nullptr; 1699 } 1700 assert(method->has_native_function(), "must have native code by now"); 1701 } 1702 1703 // RedefineClasses() has replaced this method; just return 1704 if (method->is_old()) { 1705 return nullptr; 1706 } 1707 1708 // JVMTI -- post_compile_event requires jmethod_id() that may require 1709 // a lock the compiling thread can not acquire. Prefetch it here. 1710 if (JvmtiExport::should_post_compiled_method_load()) { 1711 method->jmethod_id(); 1712 } 1713 1714 // do the compilation 1715 if (method->is_native()) { 1716 if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) { 1717 #if defined(IA32) && !defined(ZERO) 1718 // The following native methods: 1719 // 1720 // java.lang.Float.intBitsToFloat 1721 // java.lang.Float.floatToRawIntBits 1722 // java.lang.Double.longBitsToDouble 1723 // java.lang.Double.doubleToRawLongBits 1724 // 1725 // are called through the interpreter even if interpreter native stubs 1726 // are not preferred (i.e., calling through adapter handlers is preferred). 1727 // The reason is that on x86_32 signaling NaNs (sNaNs) are not preserved 1728 // if the version of the methods from the native libraries is called. 1729 // As the interpreter and the C2-intrinsified version of the methods preserves 1730 // sNaNs, that would result in an inconsistent way of handling of sNaNs. 1731 if ((UseSSE >= 1 && 1732 (method->intrinsic_id() == vmIntrinsics::_intBitsToFloat || 1733 method->intrinsic_id() == vmIntrinsics::_floatToRawIntBits)) || 1734 (UseSSE >= 2 && 1735 (method->intrinsic_id() == vmIntrinsics::_longBitsToDouble || 1736 method->intrinsic_id() == vmIntrinsics::_doubleToRawLongBits))) { 1737 return nullptr; 1738 } 1739 #endif // IA32 && !ZERO 1740 1741 // To properly handle the appendix argument for out-of-line calls we are using a small trampoline that 1742 // pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime). 1743 // 1744 // Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter 1745 // in this case. If we can't generate one and use it we can not execute the out-of-line method handle calls. 1746 AdapterHandlerLibrary::create_native_wrapper(method); 1747 } else { 1748 return nullptr; 1749 } 1750 } else { 1751 // If the compiler is shut off due to code cache getting full 1752 // fail out now so blocking compiles dont hang the java thread 1753 if (!should_compile_new_jobs()) { 1754 return nullptr; 1755 } 1756 bool is_blocking = ReplayCompiles || 1757 !directive->BackgroundCompilationOption || 1758 (PreloadBlocking && (compile_reason == CompileTask::Reason_Preload)) || 1759 (compile_reason == CompileTask::Reason_Precompile) || 1760 (compile_reason == CompileTask::Reason_PrecompileForPreload); 1761 compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, compile_reason, requires_online_compilation, is_blocking, THREAD); 1762 } 1763 1764 // return requested nmethod 1765 // We accept a higher level osr method 1766 if (osr_bci == InvocationEntryBci) { 1767 return method->code(); 1768 } 1769 return method->lookup_osr_nmethod_for(osr_bci, comp_level, false); 1770 } 1771 1772 1773 // ------------------------------------------------------------------ 1774 // CompileBroker::compilation_is_complete 1775 // 1776 // See if compilation of this method is already complete. 1777 bool CompileBroker::compilation_is_complete(Method* method, 1778 int osr_bci, 1779 int comp_level, 1780 bool online_only, 1781 CompileTask::CompileReason compile_reason) { 1782 if (compile_reason == CompileTask::Reason_Precompile || 1783 compile_reason == CompileTask::Reason_PrecompileForPreload) { 1784 return false; // FIXME: any restrictions? 1785 } 1786 bool is_osr = (osr_bci != standard_entry_bci); 1787 if (is_osr) { 1788 if (method->is_not_osr_compilable(comp_level)) { 1789 return true; 1790 } else { 1791 nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true); 1792 return (result != nullptr); 1793 } 1794 } else { 1795 if (method->is_not_compilable(comp_level)) { 1796 return true; 1797 } else { 1798 nmethod* result = method->code(); 1799 if (result == nullptr) { 1800 return false; 1801 } 1802 if (online_only && result->is_scc()) { 1803 return false; 1804 } 1805 bool same_level = (comp_level == result->comp_level()); 1806 if (result->has_clinit_barriers()) { 1807 return !same_level; // Allow replace preloaded code with new code of the same level 1808 } 1809 return same_level; 1810 } 1811 } 1812 } 1813 1814 1815 /** 1816 * See if this compilation is already requested. 1817 * 1818 * Implementation note: there is only a single "is in queue" bit 1819 * for each method. This means that the check below is overly 1820 * conservative in the sense that an osr compilation in the queue 1821 * will block a normal compilation from entering the queue (and vice 1822 * versa). This can be remedied by a full queue search to disambiguate 1823 * cases. If it is deemed profitable, this may be done. 1824 */ 1825 bool CompileBroker::compilation_is_in_queue(const methodHandle& method) { 1826 return method->queued_for_compilation(); 1827 } 1828 1829 // ------------------------------------------------------------------ 1830 // CompileBroker::compilation_is_prohibited 1831 // 1832 // See if this compilation is not allowed. 1833 bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded) { 1834 bool is_native = method->is_native(); 1835 // Some compilers may not support the compilation of natives. 1836 AbstractCompiler *comp = compiler(comp_level); 1837 if (is_native && (!CICompileNatives || comp == nullptr)) { 1838 method->set_not_compilable_quietly("native methods not supported", comp_level); 1839 return true; 1840 } 1841 1842 bool is_osr = (osr_bci != standard_entry_bci); 1843 // Some compilers may not support on stack replacement. 1844 if (is_osr && (!CICompileOSR || comp == nullptr)) { 1845 method->set_not_osr_compilable("OSR not supported", comp_level); 1846 return true; 1847 } 1848 1849 // The method may be explicitly excluded by the user. 1850 double scale; 1851 if (excluded || (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, scale) && scale == 0)) { 1852 bool quietly = CompilerOracle::be_quiet(); 1853 if (PrintCompilation && !quietly) { 1854 // This does not happen quietly... 1855 ResourceMark rm; 1856 tty->print("### Excluding %s:%s", 1857 method->is_native() ? "generation of native wrapper" : "compile", 1858 (method->is_static() ? " static" : "")); 1859 method->print_short_name(tty); 1860 tty->cr(); 1861 } 1862 method->set_not_compilable("excluded by CompileCommand", comp_level, !quietly); 1863 } 1864 1865 return false; 1866 } 1867 1868 /** 1869 * Generate serialized IDs for compilation requests. If certain debugging flags are used 1870 * and the ID is not within the specified range, the method is not compiled and 0 is returned. 1871 * The function also allows to generate separate compilation IDs for OSR compilations. 1872 */ 1873 int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) { 1874 #ifdef ASSERT 1875 bool is_osr = (osr_bci != standard_entry_bci); 1876 int id; 1877 if (method->is_native()) { 1878 assert(!is_osr, "can't be osr"); 1879 // Adapters, native wrappers and method handle intrinsics 1880 // should be generated always. 1881 return Atomic::add(CICountNative ? &_native_compilation_id : &_compilation_id, 1); 1882 } else if (CICountOSR && is_osr) { 1883 id = Atomic::add(&_osr_compilation_id, 1); 1884 if (CIStartOSR <= id && id < CIStopOSR) { 1885 return id; 1886 } 1887 } else { 1888 id = Atomic::add(&_compilation_id, 1); 1889 if (CIStart <= id && id < CIStop) { 1890 return id; 1891 } 1892 } 1893 1894 // Method was not in the appropriate compilation range. 1895 method->set_not_compilable_quietly("Not in requested compile id range"); 1896 return 0; 1897 #else 1898 // CICountOSR is a develop flag and set to 'false' by default. In a product built, 1899 // only _compilation_id is incremented. 1900 return Atomic::add(&_compilation_id, 1); 1901 #endif 1902 } 1903 1904 // ------------------------------------------------------------------ 1905 // CompileBroker::assign_compile_id_unlocked 1906 // 1907 // Public wrapper for assign_compile_id that acquires the needed locks 1908 int CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci) { 1909 return assign_compile_id(method, osr_bci); 1910 } 1911 1912 // ------------------------------------------------------------------ 1913 // CompileBroker::create_compile_task 1914 // 1915 // Create a CompileTask object representing the current request for 1916 // compilation. Add this task to the queue. 1917 CompileTask* CompileBroker::create_compile_task(CompileQueue* queue, 1918 int compile_id, 1919 const methodHandle& method, 1920 int osr_bci, 1921 int comp_level, 1922 const methodHandle& hot_method, 1923 int hot_count, 1924 SCCEntry* scc_entry, 1925 CompileTask::CompileReason compile_reason, 1926 bool requires_online_compilation, 1927 bool blocking) { 1928 CompileTask* new_task = CompileTask::allocate(); 1929 new_task->initialize(compile_id, method, osr_bci, comp_level, 1930 hot_method, hot_count, scc_entry, compile_reason, queue, 1931 requires_online_compilation, blocking); 1932 return new_task; 1933 } 1934 1935 #if INCLUDE_JVMCI 1936 // The number of milliseconds to wait before checking if 1937 // JVMCI compilation has made progress. 1938 static const long JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE = 1000; 1939 1940 // The number of JVMCI compilation progress checks that must fail 1941 // before unblocking a thread waiting for a blocking compilation. 1942 static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10; 1943 1944 /** 1945 * Waits for a JVMCI compiler to complete a given task. This thread 1946 * waits until either the task completes or it sees no JVMCI compilation 1947 * progress for N consecutive milliseconds where N is 1948 * JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE * 1949 * JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS. 1950 * 1951 * @return true if this thread needs to free/recycle the task 1952 */ 1953 bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) { 1954 assert(UseJVMCICompiler, "sanity"); 1955 MonitorLocker ml(thread, task->lock()); 1956 int progress_wait_attempts = 0; 1957 jint thread_jvmci_compilation_ticks = 0; 1958 jint global_jvmci_compilation_ticks = jvmci->global_compilation_ticks(); 1959 while (!task->is_complete() && !is_compilation_disabled_forever() && 1960 ml.wait(JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE)) { 1961 JVMCICompileState* jvmci_compile_state = task->blocking_jvmci_compile_state(); 1962 1963 bool progress; 1964 if (jvmci_compile_state != nullptr) { 1965 jint ticks = jvmci_compile_state->compilation_ticks(); 1966 progress = (ticks - thread_jvmci_compilation_ticks) != 0; 1967 JVMCI_event_1("waiting on compilation %d [ticks=%d]", task->compile_id(), ticks); 1968 thread_jvmci_compilation_ticks = ticks; 1969 } else { 1970 // Still waiting on JVMCI compiler queue. This thread may be holding a lock 1971 // that all JVMCI compiler threads are blocked on. We use the global JVMCI 1972 // compilation ticks to determine whether JVMCI compilation 1973 // is still making progress through the JVMCI compiler queue. 1974 jint ticks = jvmci->global_compilation_ticks(); 1975 progress = (ticks - global_jvmci_compilation_ticks) != 0; 1976 JVMCI_event_1("waiting on compilation %d to be queued [ticks=%d]", task->compile_id(), ticks); 1977 global_jvmci_compilation_ticks = ticks; 1978 } 1979 1980 if (!progress) { 1981 if (++progress_wait_attempts == JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS) { 1982 if (PrintCompilation) { 1983 task->print(tty, "wait for blocking compilation timed out"); 1984 } 1985 JVMCI_event_1("waiting on compilation %d timed out", task->compile_id()); 1986 break; 1987 } 1988 } else { 1989 progress_wait_attempts = 0; 1990 } 1991 } 1992 task->clear_waiter(); 1993 return task->is_complete(); 1994 } 1995 #endif 1996 1997 /** 1998 * Wait for the compilation task to complete. 1999 */ 2000 void CompileBroker::wait_for_completion(CompileTask* task) { 2001 if (CIPrintCompileQueue) { 2002 ttyLocker ttyl; 2003 tty->print_cr("BLOCKING FOR COMPILE"); 2004 } 2005 2006 assert(task->is_blocking(), "can only wait on blocking task"); 2007 2008 JavaThread* thread = JavaThread::current(); 2009 2010 methodHandle method(thread, task->method()); 2011 bool free_task; 2012 #if INCLUDE_JVMCI 2013 AbstractCompiler* comp = compiler(task->comp_level()); 2014 if (!UseJVMCINativeLibrary && comp->is_jvmci() && !task->should_wait_for_compilation()) { 2015 // It may return before compilation is completed. 2016 // Note that libjvmci should not pre-emptively unblock 2017 // a thread waiting for a compilation as it does not call 2018 // Java code and so is not deadlock prone like jarjvmci. 2019 free_task = wait_for_jvmci_completion((JVMCICompiler*) comp, task, thread); 2020 } else 2021 #endif 2022 { 2023 MonitorLocker ml(thread, task->lock()); 2024 free_task = true; 2025 task->inc_waiting_for_completion(); 2026 while (!task->is_complete() && !is_compilation_disabled_forever()) { 2027 ml.wait(); 2028 } 2029 task->dec_waiting_for_completion(); 2030 } 2031 2032 if (free_task) { 2033 if (is_compilation_disabled_forever()) { 2034 CompileTask::free(task); 2035 return; 2036 } 2037 2038 // It is harmless to check this status without the lock, because 2039 // completion is a stable property (until the task object is recycled). 2040 assert(task->is_complete(), "Compilation should have completed"); 2041 2042 // By convention, the waiter is responsible for recycling a 2043 // blocking CompileTask. Since there is only one waiter ever 2044 // waiting on a CompileTask, we know that no one else will 2045 // be using this CompileTask; we can free it. 2046 CompileTask::free(task); 2047 } 2048 } 2049 2050 /** 2051 * Initialize compiler thread(s) + compiler object(s). The postcondition 2052 * of this function is that the compiler runtimes are initialized and that 2053 * compiler threads can start compiling. 2054 */ 2055 bool CompileBroker::init_compiler_runtime() { 2056 CompilerThread* thread = CompilerThread::current(); 2057 AbstractCompiler* comp = thread->compiler(); 2058 // Final sanity check - the compiler object must exist 2059 guarantee(comp != nullptr, "Compiler object must exist"); 2060 2061 { 2062 // Must switch to native to allocate ci_env 2063 ThreadToNativeFromVM ttn(thread); 2064 ciEnv ci_env((CompileTask*)nullptr); 2065 // Cache Jvmti state 2066 ci_env.cache_jvmti_state(); 2067 // Cache DTrace flags 2068 ci_env.cache_dtrace_flags(); 2069 2070 // Switch back to VM state to do compiler initialization 2071 ThreadInVMfromNative tv(thread); 2072 2073 comp->initialize(); 2074 } 2075 2076 if (comp->is_failed()) { 2077 disable_compilation_forever(); 2078 // If compiler initialization failed, no compiler thread that is specific to a 2079 // particular compiler runtime will ever start to compile methods. 2080 shutdown_compiler_runtime(comp, thread); 2081 return false; 2082 } 2083 2084 // C1 specific check 2085 if (comp->is_c1() && (thread->get_buffer_blob() == nullptr)) { 2086 warning("Initialization of %s thread failed (no space to run compilers)", thread->name()); 2087 return false; 2088 } 2089 2090 return true; 2091 } 2092 2093 void CompileBroker::free_buffer_blob_if_allocated(CompilerThread* thread) { 2094 BufferBlob* blob = thread->get_buffer_blob(); 2095 if (blob != nullptr) { 2096 blob->purge(); 2097 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2098 CodeCache::free(blob); 2099 } 2100 } 2101 2102 /** 2103 * If C1 and/or C2 initialization failed, we shut down all compilation. 2104 * We do this to keep things simple. This can be changed if it ever turns 2105 * out to be a problem. 2106 */ 2107 void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) { 2108 free_buffer_blob_if_allocated(thread); 2109 2110 log_info(compilation)("shutdown_compiler_runtime: " INTPTR_FORMAT, p2i(thread)); 2111 2112 if (comp->should_perform_shutdown()) { 2113 // There are two reasons for shutting down the compiler 2114 // 1) compiler runtime initialization failed 2115 // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing 2116 warning("%s initialization failed. Shutting down all compilers", comp->name()); 2117 2118 // Only one thread per compiler runtime object enters here 2119 // Set state to shut down 2120 comp->set_shut_down(); 2121 2122 // Delete all queued compilation tasks to make compiler threads exit faster. 2123 if (_c1_compile_queue != nullptr) { 2124 _c1_compile_queue->free_all(); 2125 } 2126 2127 if (_c2_compile_queue != nullptr) { 2128 _c2_compile_queue->free_all(); 2129 } 2130 2131 if (_c3_compile_queue != nullptr) { 2132 _c3_compile_queue->free_all(); 2133 } 2134 2135 // Set flags so that we continue execution with using interpreter only. 2136 UseCompiler = false; 2137 UseInterpreter = true; 2138 2139 // We could delete compiler runtimes also. However, there are references to 2140 // the compiler runtime(s) (e.g., nmethod::is_compiled_by_c1()) which then 2141 // fail. This can be done later if necessary. 2142 } 2143 } 2144 2145 /** 2146 * Helper function to create new or reuse old CompileLog. 2147 */ 2148 CompileLog* CompileBroker::get_log(CompilerThread* ct) { 2149 if (!LogCompilation) return nullptr; 2150 2151 AbstractCompiler *compiler = ct->compiler(); 2152 bool jvmci = JVMCI_ONLY( compiler->is_jvmci() ||) false; 2153 bool c1 = compiler->is_c1(); 2154 jobject* compiler_objects = c1 ? _compiler1_objects : (_c3_count == 0 ? _compiler2_objects : (jvmci ? _compiler2_objects : _compiler3_objects)); 2155 assert(compiler_objects != nullptr, "must be initialized at this point"); 2156 CompileLog** logs = c1 ? _compiler1_logs : (_c3_count == 0 ? _compiler2_logs : (jvmci ? _compiler2_logs : _compiler3_logs)); 2157 assert(logs != nullptr, "must be initialized at this point"); 2158 int count = c1 ? _c1_count : (_c3_count == 0 ? _c2_count : (jvmci ? _c2_count : _c3_count)); 2159 2160 if (ct->queue() == _sc1_compile_queue || ct->queue() == _sc2_compile_queue) { 2161 compiler_objects = _sc_objects; 2162 logs = _sc_logs; 2163 count = _sc_count; 2164 } 2165 // Find Compiler number by its threadObj. 2166 oop compiler_obj = ct->threadObj(); 2167 int compiler_number = 0; 2168 bool found = false; 2169 for (; compiler_number < count; compiler_number++) { 2170 if (JNIHandles::resolve_non_null(compiler_objects[compiler_number]) == compiler_obj) { 2171 found = true; 2172 break; 2173 } 2174 } 2175 assert(found, "Compiler must exist at this point"); 2176 2177 // Determine pointer for this thread's log. 2178 CompileLog** log_ptr = &logs[compiler_number]; 2179 2180 // Return old one if it exists. 2181 CompileLog* log = *log_ptr; 2182 if (log != nullptr) { 2183 ct->init_log(log); 2184 return log; 2185 } 2186 2187 // Create a new one and remember it. 2188 init_compiler_thread_log(); 2189 log = ct->log(); 2190 *log_ptr = log; 2191 return log; 2192 } 2193 2194 // ------------------------------------------------------------------ 2195 // CompileBroker::compiler_thread_loop 2196 // 2197 // The main loop run by a CompilerThread. 2198 void CompileBroker::compiler_thread_loop() { 2199 CompilerThread* thread = CompilerThread::current(); 2200 CompileQueue* queue = thread->queue(); 2201 // For the thread that initializes the ciObjectFactory 2202 // this resource mark holds all the shared objects 2203 ResourceMark rm; 2204 2205 // First thread to get here will initialize the compiler interface 2206 2207 { 2208 ASSERT_IN_VM; 2209 MutexLocker only_one (thread, CompileThread_lock); 2210 if (!ciObjectFactory::is_initialized()) { 2211 ciObjectFactory::initialize(); 2212 } 2213 } 2214 2215 // Open a log. 2216 CompileLog* log = get_log(thread); 2217 if (log != nullptr) { 2218 log->begin_elem("start_compile_thread name='%s' thread='%zu' process='%d'", 2219 thread->name(), 2220 os::current_thread_id(), 2221 os::current_process_id()); 2222 log->stamp(); 2223 log->end_elem(); 2224 } 2225 2226 // If compiler thread/runtime initialization fails, exit the compiler thread 2227 if (!init_compiler_runtime()) { 2228 return; 2229 } 2230 2231 thread->start_idle_timer(); 2232 2233 // Poll for new compilation tasks as long as the JVM runs. Compilation 2234 // should only be disabled if something went wrong while initializing the 2235 // compiler runtimes. This, in turn, should not happen. The only known case 2236 // when compiler runtime initialization fails is if there is not enough free 2237 // space in the code cache to generate the necessary stubs, etc. 2238 while (!is_compilation_disabled_forever()) { 2239 // We need this HandleMark to avoid leaking VM handles. 2240 HandleMark hm(thread); 2241 2242 RecompilationPolicy::recompilation_step(RecompilationWorkUnitSize, thread); 2243 2244 CompileTask* task = queue->get(thread); 2245 2246 if (task == nullptr) { 2247 if (UseDynamicNumberOfCompilerThreads) { 2248 // Access compiler_count under lock to enforce consistency. 2249 MutexLocker only_one(CompileThread_lock); 2250 if (can_remove(thread, true)) { 2251 if (trace_compiler_threads()) { 2252 ResourceMark rm; 2253 stringStream msg; 2254 msg.print("Removing compiler thread %s after " JLONG_FORMAT " ms idle time", 2255 thread->name(), thread->idle_time_millis()); 2256 print_compiler_threads(msg); 2257 } 2258 2259 // Notify compiler that the compiler thread is about to stop 2260 thread->compiler()->stopping_compiler_thread(thread); 2261 2262 free_buffer_blob_if_allocated(thread); 2263 return; // Stop this thread. 2264 } 2265 } 2266 } else { 2267 // Assign the task to the current thread. Mark this compilation 2268 // thread as active for the profiler. 2269 // CompileTaskWrapper also keeps the Method* from being deallocated if redefinition 2270 // occurs after fetching the compile task off the queue. 2271 CompileTaskWrapper ctw(task); 2272 methodHandle method(thread, task->method()); 2273 2274 // Never compile a method if breakpoints are present in it 2275 if (method()->number_of_breakpoints() == 0) { 2276 // Compile the method. 2277 if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { 2278 invoke_compiler_on_method(task); 2279 thread->start_idle_timer(); 2280 } else { 2281 // After compilation is disabled, remove remaining methods from queue 2282 method->clear_queued_for_compilation(); 2283 method->set_pending_queue_processed(false); 2284 task->set_failure_reason("compilation is disabled"); 2285 } 2286 } else { 2287 task->set_failure_reason("breakpoints are present"); 2288 } 2289 2290 if (UseDynamicNumberOfCompilerThreads) { 2291 possibly_add_compiler_threads(thread); 2292 assert(!thread->has_pending_exception(), "should have been handled"); 2293 } 2294 } 2295 } 2296 2297 // Shut down compiler runtime 2298 shutdown_compiler_runtime(thread->compiler(), thread); 2299 } 2300 2301 // ------------------------------------------------------------------ 2302 // CompileBroker::init_compiler_thread_log 2303 // 2304 // Set up state required by +LogCompilation. 2305 void CompileBroker::init_compiler_thread_log() { 2306 CompilerThread* thread = CompilerThread::current(); 2307 char file_name[4*K]; 2308 FILE* fp = nullptr; 2309 intx thread_id = os::current_thread_id(); 2310 for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) { 2311 const char* dir = (try_temp_dir ? os::get_temp_directory() : nullptr); 2312 if (dir == nullptr) { 2313 jio_snprintf(file_name, sizeof(file_name), "hs_c%zu_pid%u.log", 2314 thread_id, os::current_process_id()); 2315 } else { 2316 jio_snprintf(file_name, sizeof(file_name), 2317 "%s%shs_c%zu_pid%u.log", dir, 2318 os::file_separator(), thread_id, os::current_process_id()); 2319 } 2320 2321 fp = os::fopen(file_name, "wt"); 2322 if (fp != nullptr) { 2323 if (LogCompilation && Verbose) { 2324 tty->print_cr("Opening compilation log %s", file_name); 2325 } 2326 CompileLog* log = new(mtCompiler) CompileLog(file_name, fp, thread_id); 2327 if (log == nullptr) { 2328 fclose(fp); 2329 return; 2330 } 2331 thread->init_log(log); 2332 2333 if (xtty != nullptr) { 2334 ttyLocker ttyl; 2335 // Record any per thread log files 2336 xtty->elem("thread_logfile thread='%zd' filename='%s'", thread_id, file_name); 2337 } 2338 return; 2339 } 2340 } 2341 warning("Cannot open log file: %s", file_name); 2342 } 2343 2344 void CompileBroker::log_metaspace_failure() { 2345 const char* message = "some methods may not be compiled because metaspace " 2346 "is out of memory"; 2347 if (CompilationLog::log() != nullptr) { 2348 CompilationLog::log()->log_metaspace_failure(message); 2349 } 2350 if (PrintCompilation) { 2351 tty->print_cr("COMPILE PROFILING SKIPPED: %s", message); 2352 } 2353 } 2354 2355 2356 // ------------------------------------------------------------------ 2357 // CompileBroker::set_should_block 2358 // 2359 // Set _should_block. 2360 // Call this from the VM, with Threads_lock held and a safepoint requested. 2361 void CompileBroker::set_should_block() { 2362 assert(Threads_lock->owner() == Thread::current(), "must have threads lock"); 2363 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint already"); 2364 #ifndef PRODUCT 2365 if (PrintCompilation && (Verbose || WizardMode)) 2366 tty->print_cr("notifying compiler thread pool to block"); 2367 #endif 2368 _should_block = true; 2369 } 2370 2371 // ------------------------------------------------------------------ 2372 // CompileBroker::maybe_block 2373 // 2374 // Call this from the compiler at convenient points, to poll for _should_block. 2375 void CompileBroker::maybe_block() { 2376 if (_should_block) { 2377 #ifndef PRODUCT 2378 if (PrintCompilation && (Verbose || WizardMode)) 2379 tty->print_cr("compiler thread " INTPTR_FORMAT " poll detects block request", p2i(Thread::current())); 2380 #endif 2381 // If we are executing a task during the request to block, report the task 2382 // before disappearing. 2383 CompilerThread* thread = CompilerThread::current(); 2384 if (thread != nullptr) { 2385 CompileTask* task = thread->task(); 2386 if (task != nullptr) { 2387 if (PrintCompilation) { 2388 task->print(tty, "blocked"); 2389 } 2390 task->print_ul("blocked"); 2391 } 2392 } 2393 // Go to VM state and block for final VM shutdown safepoint. 2394 ThreadInVMfromNative tivfn(JavaThread::current()); 2395 assert(false, "Should never unblock from TIVNM entry"); 2396 } 2397 } 2398 2399 // wrapper for CodeCache::print_summary() 2400 static void codecache_print(bool detailed) 2401 { 2402 stringStream s; 2403 // Dump code cache into a buffer before locking the tty, 2404 { 2405 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2406 CodeCache::print_summary(&s, detailed); 2407 } 2408 ttyLocker ttyl; 2409 tty->print("%s", s.freeze()); 2410 } 2411 2412 // wrapper for CodeCache::print_summary() using outputStream 2413 static void codecache_print(outputStream* out, bool detailed) { 2414 stringStream s; 2415 2416 // Dump code cache into a buffer 2417 { 2418 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2419 CodeCache::print_summary(&s, detailed); 2420 } 2421 2422 char* remaining_log = s.as_string(); 2423 while (*remaining_log != '\0') { 2424 char* eol = strchr(remaining_log, '\n'); 2425 if (eol == nullptr) { 2426 out->print_cr("%s", remaining_log); 2427 remaining_log = remaining_log + strlen(remaining_log); 2428 } else { 2429 *eol = '\0'; 2430 out->print_cr("%s", remaining_log); 2431 remaining_log = eol + 1; 2432 } 2433 } 2434 } 2435 2436 void CompileBroker::handle_compile_error(CompilerThread* thread, CompileTask* task, ciEnv* ci_env, 2437 int compilable, const char* failure_reason) { 2438 if (!AbortVMOnCompilationFailure) { 2439 return; 2440 } 2441 if (compilable == ciEnv::MethodCompilable_not_at_tier) { 2442 fatal("Not compilable at tier %d: %s", task->comp_level(), failure_reason); 2443 } 2444 if (compilable == ciEnv::MethodCompilable_never) { 2445 fatal("Never compilable: %s", failure_reason); 2446 } 2447 } 2448 2449 static void post_compilation_event(EventCompilation& event, CompileTask* task) { 2450 assert(task != nullptr, "invariant"); 2451 CompilerEvent::CompilationEvent::post(event, 2452 task->compile_id(), 2453 task->compiler()->type(), 2454 task->method(), 2455 task->comp_level(), 2456 task->is_success(), 2457 task->osr_bci() != CompileBroker::standard_entry_bci, 2458 task->nm_total_size(), 2459 task->num_inlined_bytecodes(), 2460 task->arena_bytes()); 2461 } 2462 2463 int DirectivesStack::_depth = 0; 2464 CompilerDirectives* DirectivesStack::_top = nullptr; 2465 CompilerDirectives* DirectivesStack::_bottom = nullptr; 2466 2467 // Acquires Compilation_lock and waits for it to be notified 2468 // as long as WhiteBox::compilation_locked is true. 2469 static void whitebox_lock_compilation() { 2470 MonitorLocker locker(Compilation_lock, Mutex::_no_safepoint_check_flag); 2471 while (WhiteBox::compilation_locked) { 2472 locker.wait(); 2473 } 2474 } 2475 2476 // ------------------------------------------------------------------ 2477 // CompileBroker::invoke_compiler_on_method 2478 // 2479 // Compile a method. 2480 // 2481 void CompileBroker::invoke_compiler_on_method(CompileTask* task) { 2482 task->print_ul(); 2483 elapsedTimer time; 2484 2485 DirectiveSet* directive = task->directive(); 2486 2487 CompilerThread* thread = CompilerThread::current(); 2488 ResourceMark rm(thread); 2489 2490 if (CompilationLog::log() != nullptr) { 2491 CompilationLog::log()->log_compile(thread, task); 2492 } 2493 2494 // Common flags. 2495 int compile_id = task->compile_id(); 2496 int osr_bci = task->osr_bci(); 2497 bool is_osr = (osr_bci != standard_entry_bci); 2498 bool should_log = (thread->log() != nullptr); 2499 bool should_break = false; 2500 bool should_print_compilation = PrintCompilation || directive->PrintCompilationOption; 2501 const int task_level = task->comp_level(); 2502 AbstractCompiler* comp = task->compiler(); 2503 { 2504 // create the handle inside it's own block so it can't 2505 // accidentally be referenced once the thread transitions to 2506 // native. The NoHandleMark before the transition should catch 2507 // any cases where this occurs in the future. 2508 methodHandle method(thread, task->method()); 2509 2510 assert(!method->is_native(), "no longer compile natives"); 2511 2512 // Update compile information when using perfdata. 2513 if (UsePerfData) { 2514 update_compile_perf_data(thread, method, is_osr); 2515 } 2516 2517 DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level)); 2518 } 2519 2520 should_break = directive->BreakAtCompileOption || task->check_break_at_flags(); 2521 if (should_log && !directive->LogOption) { 2522 should_log = false; 2523 } 2524 2525 // Allocate a new set of JNI handles. 2526 JNIHandleMark jhm(thread); 2527 Method* target_handle = task->method(); 2528 int compilable = ciEnv::MethodCompilable; 2529 const char* failure_reason = nullptr; 2530 bool failure_reason_on_C_heap = false; 2531 const char* retry_message = nullptr; 2532 2533 #if INCLUDE_JVMCI 2534 if (UseJVMCICompiler && comp != nullptr && comp->is_jvmci()) { 2535 JVMCICompiler* jvmci = (JVMCICompiler*) comp; 2536 2537 TraceTime t1("compilation", &time); 2538 EventCompilation event; 2539 JVMCICompileState compile_state(task, jvmci); 2540 JVMCIRuntime *runtime = nullptr; 2541 2542 if (JVMCI::in_shutdown()) { 2543 failure_reason = "in JVMCI shutdown"; 2544 retry_message = "not retryable"; 2545 compilable = ciEnv::MethodCompilable_never; 2546 } else if (compile_state.target_method_is_old()) { 2547 // Skip redefined methods 2548 failure_reason = "redefined method"; 2549 retry_message = "not retryable"; 2550 compilable = ciEnv::MethodCompilable_never; 2551 } else { 2552 JVMCIEnv env(thread, &compile_state, __FILE__, __LINE__); 2553 if (env.init_error() != JNI_OK) { 2554 const char* msg = env.init_error_msg(); 2555 failure_reason = os::strdup(err_msg("Error attaching to libjvmci (err: %d, %s)", 2556 env.init_error(), msg == nullptr ? "unknown" : msg), mtJVMCI); 2557 bool reason_on_C_heap = true; 2558 // In case of JNI_ENOMEM, there's a good chance a subsequent attempt to create libjvmci or attach to it 2559 // might succeed. Other errors most likely indicate a non-recoverable error in the JVMCI runtime. 2560 bool retryable = env.init_error() == JNI_ENOMEM; 2561 compile_state.set_failure(retryable, failure_reason, reason_on_C_heap); 2562 } 2563 if (failure_reason == nullptr) { 2564 if (WhiteBoxAPI && WhiteBox::compilation_locked) { 2565 // Must switch to native to block 2566 ThreadToNativeFromVM ttn(thread); 2567 whitebox_lock_compilation(); 2568 } 2569 methodHandle method(thread, target_handle); 2570 runtime = env.runtime(); 2571 runtime->compile_method(&env, jvmci, method, osr_bci); 2572 2573 failure_reason = compile_state.failure_reason(); 2574 failure_reason_on_C_heap = compile_state.failure_reason_on_C_heap(); 2575 if (!compile_state.retryable()) { 2576 retry_message = "not retryable"; 2577 compilable = ciEnv::MethodCompilable_not_at_tier; 2578 } 2579 if (!task->is_success()) { 2580 assert(failure_reason != nullptr, "must specify failure_reason"); 2581 } 2582 } 2583 } 2584 if (!task->is_success() && !JVMCI::in_shutdown()) { 2585 handle_compile_error(thread, task, nullptr, compilable, failure_reason); 2586 } 2587 if (event.should_commit()) { 2588 post_compilation_event(event, task); 2589 } 2590 2591 if (runtime != nullptr) { 2592 runtime->post_compile(thread); 2593 } 2594 } else 2595 #endif // INCLUDE_JVMCI 2596 { 2597 NoHandleMark nhm; 2598 ThreadToNativeFromVM ttn(thread); 2599 2600 ciEnv ci_env(task); 2601 if (should_break) { 2602 ci_env.set_break_at_compile(true); 2603 } 2604 if (should_log) { 2605 ci_env.set_log(thread->log()); 2606 } 2607 assert(thread->env() == &ci_env, "set by ci_env"); 2608 // The thread-env() field is cleared in ~CompileTaskWrapper. 2609 2610 // Cache Jvmti state 2611 bool method_is_old = ci_env.cache_jvmti_state(); 2612 2613 // Skip redefined methods 2614 if (method_is_old) { 2615 ci_env.record_method_not_compilable("redefined method", true); 2616 } 2617 2618 // Cache DTrace flags 2619 ci_env.cache_dtrace_flags(); 2620 2621 ciMethod* target = ci_env.get_method_from_handle(target_handle); 2622 2623 TraceTime t1("compilation", &time); 2624 EventCompilation event; 2625 2626 bool install_code = true; 2627 if (comp == nullptr) { 2628 ci_env.record_method_not_compilable("no compiler"); 2629 } else if (!ci_env.failing()) { 2630 if (WhiteBoxAPI && WhiteBox::compilation_locked) { 2631 whitebox_lock_compilation(); 2632 } 2633 if (StoreCachedCode && task->is_precompiled()) { 2634 install_code = false; // not suitable in the current context 2635 } 2636 comp->compile_method(&ci_env, target, osr_bci, install_code, directive); 2637 2638 /* Repeat compilation without installing code for profiling purposes */ 2639 int repeat_compilation_count = directive->RepeatCompilationOption; 2640 while (repeat_compilation_count > 0) { 2641 ResourceMark rm(thread); 2642 task->print_ul("NO CODE INSTALLED"); 2643 comp->compile_method(&ci_env, target, osr_bci, false, directive); 2644 repeat_compilation_count--; 2645 } 2646 } 2647 2648 DirectivesStack::release(directive); 2649 2650 if (!ci_env.failing() && !task->is_success() && install_code) { 2651 assert(ci_env.failure_reason() != nullptr, "expect failure reason"); 2652 assert(false, "compiler should always document failure: %s", ci_env.failure_reason()); 2653 // The compiler elected, without comment, not to register a result. 2654 // Do not attempt further compilations of this method. 2655 ci_env.record_method_not_compilable("compile failed"); 2656 } 2657 2658 // Copy this bit to the enclosing block: 2659 compilable = ci_env.compilable(); 2660 2661 if (ci_env.failing()) { 2662 // Duplicate the failure reason string, so that it outlives ciEnv 2663 failure_reason = os::strdup(ci_env.failure_reason(), mtCompiler); 2664 failure_reason_on_C_heap = true; 2665 retry_message = ci_env.retry_message(); 2666 ci_env.report_failure(failure_reason); 2667 } 2668 2669 if (ci_env.failing()) { 2670 handle_compile_error(thread, task, &ci_env, compilable, failure_reason); 2671 } 2672 if (event.should_commit()) { 2673 post_compilation_event(event, task); 2674 } 2675 } 2676 2677 if (failure_reason != nullptr) { 2678 task->set_failure_reason(failure_reason, failure_reason_on_C_heap); 2679 if (CompilationLog::log() != nullptr) { 2680 CompilationLog::log()->log_failure(thread, task, failure_reason, retry_message); 2681 } 2682 if (PrintCompilation) { 2683 FormatBufferResource msg = retry_message != nullptr ? 2684 FormatBufferResource("COMPILE SKIPPED: %s (%s)", failure_reason, retry_message) : 2685 FormatBufferResource("COMPILE SKIPPED: %s", failure_reason); 2686 task->print(tty, msg); 2687 } 2688 } 2689 2690 task->mark_finished(os::elapsed_counter()); 2691 2692 methodHandle method(thread, task->method()); 2693 2694 DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success()); 2695 2696 collect_statistics(thread, time, task); 2697 2698 if (PrintCompilation && PrintCompilation2) { 2699 tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp 2700 tty->print("%4d ", compile_id); // print compilation number 2701 tty->print("%s ", (is_osr ? "%" : (task->is_scc() ? "A" : " "))); 2702 if (task->is_success()) { 2703 tty->print("size: %d(%d) ", task->nm_total_size(), task->nm_insts_size()); 2704 } 2705 tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes()); 2706 } 2707 2708 Log(compilation, codecache) log; 2709 if (log.is_debug()) { 2710 LogStream ls(log.debug()); 2711 codecache_print(&ls, /* detailed= */ false); 2712 } 2713 if (PrintCodeCacheOnCompilation) { 2714 codecache_print(/* detailed= */ false); 2715 } 2716 // Disable compilation, if required. 2717 switch (compilable) { 2718 case ciEnv::MethodCompilable_never: 2719 if (is_osr) 2720 method->set_not_osr_compilable_quietly("MethodCompilable_never"); 2721 else 2722 method->set_not_compilable_quietly("MethodCompilable_never"); 2723 break; 2724 case ciEnv::MethodCompilable_not_at_tier: 2725 if (is_osr) 2726 method->set_not_osr_compilable_quietly("MethodCompilable_not_at_tier", task_level); 2727 else 2728 method->set_not_compilable_quietly("MethodCompilable_not_at_tier", task_level); 2729 break; 2730 } 2731 2732 // Note that the queued_for_compilation bits are cleared without 2733 // protection of a mutex. [They were set by the requester thread, 2734 // when adding the task to the compile queue -- at which time the 2735 // compile queue lock was held. Subsequently, we acquired the compile 2736 // queue lock to get this task off the compile queue; thus (to belabour 2737 // the point somewhat) our clearing of the bits must be occurring 2738 // only after the setting of the bits. See also 14012000 above. 2739 method->clear_queued_for_compilation(); 2740 method->set_pending_queue_processed(false); 2741 2742 if (should_print_compilation) { 2743 ResourceMark rm; 2744 task->print_tty(); 2745 } 2746 } 2747 2748 /** 2749 * The CodeCache is full. Print warning and disable compilation. 2750 * Schedule code cache cleaning so compilation can continue later. 2751 * This function needs to be called only from CodeCache::allocate(), 2752 * since we currently handle a full code cache uniformly. 2753 */ 2754 void CompileBroker::handle_full_code_cache(CodeBlobType code_blob_type) { 2755 UseInterpreter = true; 2756 if (UseCompiler || AlwaysCompileLoopMethods ) { 2757 if (xtty != nullptr) { 2758 stringStream s; 2759 // Dump code cache state into a buffer before locking the tty, 2760 // because log_state() will use locks causing lock conflicts. 2761 CodeCache::log_state(&s); 2762 // Lock to prevent tearing 2763 ttyLocker ttyl; 2764 xtty->begin_elem("code_cache_full"); 2765 xtty->print("%s", s.freeze()); 2766 xtty->stamp(); 2767 xtty->end_elem(); 2768 } 2769 2770 #ifndef PRODUCT 2771 if (ExitOnFullCodeCache) { 2772 codecache_print(/* detailed= */ true); 2773 before_exit(JavaThread::current()); 2774 exit_globals(); // will delete tty 2775 vm_direct_exit(1); 2776 } 2777 #endif 2778 if (UseCodeCacheFlushing) { 2779 // Since code cache is full, immediately stop new compiles 2780 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { 2781 log_info(codecache)("Code cache is full - disabling compilation"); 2782 } 2783 } else { 2784 disable_compilation_forever(); 2785 } 2786 2787 CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning()); 2788 } 2789 } 2790 2791 // ------------------------------------------------------------------ 2792 // CompileBroker::update_compile_perf_data 2793 // 2794 // Record this compilation for debugging purposes. 2795 void CompileBroker::update_compile_perf_data(CompilerThread* thread, const methodHandle& method, bool is_osr) { 2796 ResourceMark rm; 2797 char* method_name = method->name()->as_C_string(); 2798 char current_method[CompilerCounters::cmname_buffer_length]; 2799 size_t maxLen = CompilerCounters::cmname_buffer_length; 2800 2801 const char* class_name = method->method_holder()->name()->as_C_string(); 2802 2803 size_t s1len = strlen(class_name); 2804 size_t s2len = strlen(method_name); 2805 2806 // check if we need to truncate the string 2807 if (s1len + s2len + 2 > maxLen) { 2808 2809 // the strategy is to lop off the leading characters of the 2810 // class name and the trailing characters of the method name. 2811 2812 if (s2len + 2 > maxLen) { 2813 // lop of the entire class name string, let snprintf handle 2814 // truncation of the method name. 2815 class_name += s1len; // null string 2816 } 2817 else { 2818 // lop off the extra characters from the front of the class name 2819 class_name += ((s1len + s2len + 2) - maxLen); 2820 } 2821 } 2822 2823 jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name); 2824 2825 int last_compile_type = normal_compile; 2826 if (CICountOSR && is_osr) { 2827 last_compile_type = osr_compile; 2828 } else if (CICountNative && method->is_native()) { 2829 last_compile_type = native_compile; 2830 } 2831 2832 CompilerCounters* counters = thread->counters(); 2833 counters->set_current_method(current_method); 2834 counters->set_compile_type((jlong) last_compile_type); 2835 } 2836 2837 // ------------------------------------------------------------------ 2838 // CompileBroker::collect_statistics 2839 // 2840 // Collect statistics about the compilation. 2841 2842 void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task) { 2843 bool success = task->is_success(); 2844 methodHandle method (thread, task->method()); 2845 int compile_id = task->compile_id(); 2846 bool is_osr = (task->osr_bci() != standard_entry_bci); 2847 const int comp_level = task->comp_level(); 2848 CompilerCounters* counters = thread->counters(); 2849 2850 MutexLocker locker(CompileStatistics_lock); 2851 2852 // _perf variables are production performance counters which are 2853 // updated regardless of the setting of the CITime and CITimeEach flags 2854 // 2855 2856 // account all time, including bailouts and failures in this counter; 2857 // C1 and C2 counters are counting both successful and unsuccessful compiles 2858 _t_total_compilation.add(&time); 2859 2860 // Update compilation times. Used by the implementation of JFR CompilerStatistics 2861 // and java.lang.management.CompilationMXBean. 2862 _perf_total_compilation->inc(time.ticks()); 2863 _peak_compilation_time = MAX2(time.milliseconds(), _peak_compilation_time); 2864 2865 if (!success) { 2866 _total_bailout_count++; 2867 if (UsePerfData) { 2868 _perf_last_failed_method->set_value(counters->current_method()); 2869 _perf_last_failed_type->set_value(counters->compile_type()); 2870 _perf_total_bailout_count->inc(); 2871 } 2872 _t_bailedout_compilation.add(&time); 2873 2874 if (CITime || log_is_enabled(Info, init)) { 2875 CompilerStatistics* stats = nullptr; 2876 if (task->is_scc()) { 2877 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2878 stats = &_scc_stats_per_level[level]; 2879 } else { 2880 stats = &_stats_per_level[comp_level-1]; 2881 } 2882 stats->_bailout.update(time, 0); 2883 } 2884 } else if (!task->is_success()) { 2885 if (UsePerfData) { 2886 _perf_last_invalidated_method->set_value(counters->current_method()); 2887 _perf_last_invalidated_type->set_value(counters->compile_type()); 2888 _perf_total_invalidated_count->inc(); 2889 } 2890 _total_invalidated_count++; 2891 _t_invalidated_compilation.add(&time); 2892 2893 if (CITime || log_is_enabled(Info, init)) { 2894 CompilerStatistics* stats = nullptr; 2895 if (task->is_scc()) { 2896 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2897 stats = &_scc_stats_per_level[level]; 2898 } else { 2899 stats = &_stats_per_level[comp_level-1]; 2900 } 2901 stats->_invalidated.update(time, 0); 2902 } 2903 } else { 2904 // Compilation succeeded 2905 if (CITime || log_is_enabled(Info, init)) { 2906 int bytes_compiled = method->code_size() + task->num_inlined_bytecodes(); 2907 if (is_osr) { 2908 _t_osr_compilation.add(&time); 2909 _sum_osr_bytes_compiled += bytes_compiled; 2910 } else { 2911 _t_standard_compilation.add(&time); 2912 _sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); 2913 } 2914 2915 // Collect statistic per compilation level 2916 if (task->is_scc()) { 2917 _scc_stats._standard.update(time, bytes_compiled); 2918 _scc_stats._nmethods_size += task->nm_total_size(); 2919 _scc_stats._nmethods_code_size += task->nm_insts_size(); 2920 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2921 CompilerStatistics* stats = &_scc_stats_per_level[level]; 2922 stats->_standard.update(time, bytes_compiled); 2923 stats->_nmethods_size += task->nm_total_size(); 2924 stats->_nmethods_code_size += task->nm_insts_size(); 2925 } else if (comp_level > CompLevel_none && comp_level <= CompLevel_full_optimization) { 2926 CompilerStatistics* stats = &_stats_per_level[comp_level-1]; 2927 if (is_osr) { 2928 stats->_osr.update(time, bytes_compiled); 2929 } else { 2930 stats->_standard.update(time, bytes_compiled); 2931 } 2932 stats->_nmethods_size += task->nm_total_size(); 2933 stats->_nmethods_code_size += task->nm_insts_size(); 2934 } else { 2935 assert(false, "CompilerStatistics object does not exist for compilation level %d", comp_level); 2936 } 2937 2938 // Collect statistic per compiler 2939 AbstractCompiler* comp = task->compiler(); 2940 if (comp && !task->is_scc()) { 2941 CompilerStatistics* stats = comp->stats(); 2942 if (is_osr) { 2943 stats->_osr.update(time, bytes_compiled); 2944 } else { 2945 stats->_standard.update(time, bytes_compiled); 2946 } 2947 stats->_nmethods_size += task->nm_total_size(); 2948 stats->_nmethods_code_size += task->nm_insts_size(); 2949 } else if (!task->is_scc()) { // if (!comp) 2950 assert(false, "Compiler object must exist"); 2951 } 2952 } 2953 2954 if (UsePerfData) { 2955 // save the name of the last method compiled 2956 _perf_last_method->set_value(counters->current_method()); 2957 _perf_last_compile_type->set_value(counters->compile_type()); 2958 _perf_last_compile_size->set_value(method->code_size() + 2959 task->num_inlined_bytecodes()); 2960 if (is_osr) { 2961 _perf_osr_compilation->inc(time.ticks()); 2962 _perf_sum_osr_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes()); 2963 } else { 2964 _perf_standard_compilation->inc(time.ticks()); 2965 _perf_sum_standard_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes()); 2966 } 2967 } 2968 2969 if (CITimeEach) { 2970 double compile_time = time.seconds(); 2971 double bytes_per_sec = compile_time == 0.0 ? 0.0 : (double)(method->code_size() + task->num_inlined_bytecodes()) / compile_time; 2972 tty->print_cr("%3d seconds: %6.3f bytes/sec : %f (bytes %d + %d inlined)", 2973 compile_id, compile_time, bytes_per_sec, method->code_size(), task->num_inlined_bytecodes()); 2974 } 2975 2976 // Collect counts of successful compilations 2977 _sum_nmethod_size += task->nm_total_size(); 2978 _sum_nmethod_code_size += task->nm_insts_size(); 2979 _total_compile_count++; 2980 2981 if (UsePerfData) { 2982 _perf_sum_nmethod_size->inc( task->nm_total_size()); 2983 _perf_sum_nmethod_code_size->inc(task->nm_insts_size()); 2984 _perf_total_compile_count->inc(); 2985 } 2986 2987 if (is_osr) { 2988 if (UsePerfData) _perf_total_osr_compile_count->inc(); 2989 _total_osr_compile_count++; 2990 } else { 2991 if (UsePerfData) _perf_total_standard_compile_count->inc(); 2992 _total_standard_compile_count++; 2993 } 2994 } 2995 // set the current method for the thread to null 2996 if (UsePerfData) counters->set_current_method(""); 2997 } 2998 2999 const char* CompileBroker::compiler_name(int comp_level) { 3000 AbstractCompiler *comp = CompileBroker::compiler(comp_level); 3001 if (comp == nullptr) { 3002 return "no compiler"; 3003 } else { 3004 return (comp->name()); 3005 } 3006 } 3007 3008 jlong CompileBroker::total_compilation_ticks() { 3009 return _perf_total_compilation != nullptr ? _perf_total_compilation->get_value() : 0; 3010 } 3011 3012 void CompileBroker::log_not_entrant(nmethod* nm) { 3013 _total_not_entrant_count++; 3014 if (CITime || log_is_enabled(Info, init)) { 3015 CompilerStatistics* stats = nullptr; 3016 int level = nm->comp_level(); 3017 if (nm->is_scc()) { 3018 if (nm->preloaded()) { 3019 assert(level == CompLevel_full_optimization, "%d", level); 3020 level = CompLevel_full_optimization + 1; 3021 } 3022 stats = &_scc_stats_per_level[level - 1]; 3023 } else { 3024 stats = &_stats_per_level[level - 1]; 3025 } 3026 stats->_made_not_entrant._count++; 3027 } 3028 } 3029 3030 void CompileBroker::print_times(const char* name, CompilerStatistics* stats) { 3031 tty->print_cr(" %s {speed: %6.3f bytes/s; standard: %6.3f s, %u bytes, %u methods; osr: %6.3f s, %u bytes, %u methods; nmethods_size: %u bytes; nmethods_code_size: %u bytes}", 3032 name, stats->bytes_per_second(), 3033 stats->_standard._time.seconds(), stats->_standard._bytes, stats->_standard._count, 3034 stats->_osr._time.seconds(), stats->_osr._bytes, stats->_osr._count, 3035 stats->_nmethods_size, stats->_nmethods_code_size); 3036 } 3037 3038 static void print_helper(outputStream* st, const char* name, CompilerStatistics::Data data, bool print_time = true) { 3039 if (data._count > 0) { 3040 st->print("; %s: %4u methods", name, data._count); 3041 if (print_time) { 3042 st->print(" (in %.3fs)", data._time.seconds()); 3043 } 3044 } 3045 } 3046 3047 static void print_tier_helper(outputStream* st, const char* prefix, int tier, CompilerStatistics* stats) { 3048 st->print(" %s%d: %5u methods", prefix, tier, stats->_standard._count); 3049 if (stats->_standard._count > 0) { 3050 st->print(" (in %.3fs)", stats->_standard._time.seconds()); 3051 } 3052 print_helper(st, "osr", stats->_osr); 3053 print_helper(st, "bailout", stats->_bailout); 3054 print_helper(st, "invalid", stats->_invalidated); 3055 print_helper(st, "not_entrant", stats->_made_not_entrant, false); 3056 st->cr(); 3057 } 3058 3059 static void print_queue_info(outputStream* st, CompileQueue* queue) { 3060 if (queue != nullptr) { 3061 MutexLocker ml(queue->lock()); 3062 3063 uint total_cnt = 0; 3064 uint active_cnt = 0; 3065 for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) { 3066 guarantee(jt != nullptr, ""); 3067 if (jt->is_Compiler_thread()) { 3068 CompilerThread* ct = (CompilerThread*)jt; 3069 3070 guarantee(ct != nullptr, ""); 3071 if (ct->queue() == queue) { 3072 ++total_cnt; 3073 CompileTask* task = ct->task(); 3074 if (task != nullptr) { 3075 ++active_cnt; 3076 } 3077 } 3078 } 3079 } 3080 3081 st->print(" %s (%d active / %d total threads): %u tasks", 3082 queue->name(), active_cnt, total_cnt, queue->size()); 3083 if (queue->size() > 0) { 3084 uint counts[] = {0, 0, 0, 0, 0}; // T1 ... T5 3085 for (CompileTask* task = queue->first(); task != nullptr; task = task->next()) { 3086 int tier = task->comp_level(); 3087 if (task->is_scc() && task->preload()) { 3088 assert(tier == CompLevel_full_optimization, "%d", tier); 3089 tier = CompLevel_full_optimization + 1; 3090 } 3091 counts[tier-1]++; 3092 } 3093 st->print(":"); 3094 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3095 uint cnt = counts[tier-1]; 3096 if (cnt > 0) { 3097 st->print(" T%d: %u tasks;", tier, cnt); 3098 } 3099 } 3100 } 3101 st->cr(); 3102 3103 // for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) { 3104 // guarantee(jt != nullptr, ""); 3105 // if (jt->is_Compiler_thread()) { 3106 // CompilerThread* ct = (CompilerThread*)jt; 3107 // 3108 // guarantee(ct != nullptr, ""); 3109 // if (ct->queue() == queue) { 3110 // ResourceMark rm; 3111 // CompileTask* task = ct->task(); 3112 // st->print(" %s: ", ct->name_raw()); 3113 // if (task != nullptr) { 3114 // task->print(st, nullptr, true /*short_form*/, false /*cr*/); 3115 // } 3116 // st->cr(); 3117 // } 3118 // } 3119 // } 3120 } 3121 } 3122 void CompileBroker::print_statistics_on(outputStream* st) { 3123 st->print_cr(" Total: %u methods; %u bailouts, %u invalidated, %u non_entrant", 3124 _total_compile_count, _total_bailout_count, _total_invalidated_count, _total_not_entrant_count); 3125 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) { 3126 print_tier_helper(st, "Tier", tier, &_stats_per_level[tier-1]); 3127 } 3128 st->cr(); 3129 3130 if (LoadCachedCode || StoreCachedCode) { 3131 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3132 if (tier != CompLevel_full_profile) { 3133 print_tier_helper(st, "SC T", tier, &_scc_stats_per_level[tier - 1]); 3134 } 3135 } 3136 st->cr(); 3137 } 3138 3139 print_queue_info(st, _c1_compile_queue); 3140 print_queue_info(st, _c2_compile_queue); 3141 print_queue_info(st, _c3_compile_queue); 3142 print_queue_info(st, _sc1_compile_queue); 3143 print_queue_info(st, _sc2_compile_queue); 3144 } 3145 3146 void CompileBroker::print_times(bool per_compiler, bool aggregate) { 3147 if (per_compiler) { 3148 if (aggregate) { 3149 tty->cr(); 3150 tty->print_cr("[%dms] Individual compiler times (for compiled methods only)", (int)tty->time_stamp().milliseconds()); 3151 tty->print_cr("------------------------------------------------"); 3152 tty->cr(); 3153 } 3154 for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) { 3155 AbstractCompiler* comp = _compilers[i]; 3156 if (comp != nullptr) { 3157 print_times(comp->name(), comp->stats()); 3158 } 3159 } 3160 if (_scc_stats._standard._count > 0) { 3161 print_times("SC", &_scc_stats); 3162 } 3163 if (aggregate) { 3164 tty->cr(); 3165 tty->print_cr("Individual compilation Tier times (for compiled methods only)"); 3166 tty->print_cr("------------------------------------------------"); 3167 tty->cr(); 3168 } 3169 char tier_name[256]; 3170 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) { 3171 CompilerStatistics* stats = &_stats_per_level[tier-1]; 3172 os::snprintf_checked(tier_name, sizeof(tier_name), "Tier%d", tier); 3173 print_times(tier_name, stats); 3174 } 3175 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3176 CompilerStatistics* stats = &_scc_stats_per_level[tier-1]; 3177 if (stats->_standard._bytes > 0) { 3178 os::snprintf_checked(tier_name, sizeof(tier_name), "SC T%d", tier); 3179 print_times(tier_name, stats); 3180 } 3181 } 3182 } 3183 3184 if (!aggregate) { 3185 return; 3186 } 3187 3188 elapsedTimer standard_compilation = CompileBroker::_t_standard_compilation; 3189 elapsedTimer osr_compilation = CompileBroker::_t_osr_compilation; 3190 elapsedTimer total_compilation = CompileBroker::_t_total_compilation; 3191 3192 uint standard_bytes_compiled = CompileBroker::_sum_standard_bytes_compiled; 3193 uint osr_bytes_compiled = CompileBroker::_sum_osr_bytes_compiled; 3194 3195 uint standard_compile_count = CompileBroker::_total_standard_compile_count; 3196 uint osr_compile_count = CompileBroker::_total_osr_compile_count; 3197 uint total_compile_count = CompileBroker::_total_compile_count; 3198 uint total_bailout_count = CompileBroker::_total_bailout_count; 3199 uint total_invalidated_count = CompileBroker::_total_invalidated_count; 3200 3201 uint nmethods_code_size = CompileBroker::_sum_nmethod_code_size; 3202 uint nmethods_size = CompileBroker::_sum_nmethod_size; 3203 3204 tty->cr(); 3205 tty->print_cr("Accumulated compiler times"); 3206 tty->print_cr("----------------------------------------------------------"); 3207 //0000000000111111111122222222223333333333444444444455555555556666666666 3208 //0123456789012345678901234567890123456789012345678901234567890123456789 3209 tty->print_cr(" Total compilation time : %7.3f s", total_compilation.seconds()); 3210 tty->print_cr(" Standard compilation : %7.3f s, Average : %2.3f s", 3211 standard_compilation.seconds(), 3212 standard_compile_count == 0 ? 0.0 : standard_compilation.seconds() / standard_compile_count); 3213 tty->print_cr(" Bailed out compilation : %7.3f s, Average : %2.3f s", 3214 CompileBroker::_t_bailedout_compilation.seconds(), 3215 total_bailout_count == 0 ? 0.0 : CompileBroker::_t_bailedout_compilation.seconds() / total_bailout_count); 3216 tty->print_cr(" On stack replacement : %7.3f s, Average : %2.3f s", 3217 osr_compilation.seconds(), 3218 osr_compile_count == 0 ? 0.0 : osr_compilation.seconds() / osr_compile_count); 3219 tty->print_cr(" Invalidated : %7.3f s, Average : %2.3f s", 3220 CompileBroker::_t_invalidated_compilation.seconds(), 3221 total_invalidated_count == 0 ? 0.0 : CompileBroker::_t_invalidated_compilation.seconds() / total_invalidated_count); 3222 3223 if (StoreCachedCode || LoadCachedCode) { // Check flags because SC cache could be closed already 3224 tty->cr(); 3225 SCCache::print_timers_on(tty); 3226 } 3227 AbstractCompiler *comp = compiler(CompLevel_simple); 3228 if (comp != nullptr) { 3229 tty->cr(); 3230 comp->print_timers(); 3231 } 3232 comp = compiler(CompLevel_full_optimization); 3233 if (comp != nullptr) { 3234 tty->cr(); 3235 comp->print_timers(); 3236 } 3237 comp = _compilers[2]; 3238 if (comp != nullptr) { 3239 tty->cr(); 3240 comp->print_timers(); 3241 } 3242 #if INCLUDE_JVMCI 3243 if (EnableJVMCI) { 3244 JVMCICompiler *jvmci_comp = JVMCICompiler::instance(false, JavaThread::current_or_null()); 3245 if (jvmci_comp != nullptr && jvmci_comp != comp) { 3246 tty->cr(); 3247 jvmci_comp->print_timers(); 3248 } 3249 } 3250 #endif 3251 3252 tty->cr(); 3253 tty->print_cr(" Total compiled methods : %8u methods", total_compile_count); 3254 tty->print_cr(" Standard compilation : %8u methods", standard_compile_count); 3255 tty->print_cr(" On stack replacement : %8u methods", osr_compile_count); 3256 uint tcb = osr_bytes_compiled + standard_bytes_compiled; 3257 tty->print_cr(" Total compiled bytecodes : %8u bytes", tcb); 3258 tty->print_cr(" Standard compilation : %8u bytes", standard_bytes_compiled); 3259 tty->print_cr(" On stack replacement : %8u bytes", osr_bytes_compiled); 3260 double tcs = total_compilation.seconds(); 3261 uint bps = tcs == 0.0 ? 0 : (uint)(tcb / tcs); 3262 tty->print_cr(" Average compilation speed : %8u bytes/s", bps); 3263 tty->cr(); 3264 tty->print_cr(" nmethod code size : %8u bytes", nmethods_code_size); 3265 tty->print_cr(" nmethod total size : %8u bytes", nmethods_size); 3266 } 3267 3268 // Print general/accumulated JIT information. 3269 void CompileBroker::print_info(outputStream *out) { 3270 if (out == nullptr) out = tty; 3271 out->cr(); 3272 out->print_cr("======================"); 3273 out->print_cr(" General JIT info "); 3274 out->print_cr("======================"); 3275 out->cr(); 3276 out->print_cr(" JIT is : %7s", should_compile_new_jobs() ? "on" : "off"); 3277 out->print_cr(" Compiler threads : %7d", (int)CICompilerCount); 3278 out->cr(); 3279 out->print_cr("CodeCache overview"); 3280 out->print_cr("--------------------------------------------------------"); 3281 out->cr(); 3282 out->print_cr(" Reserved size : %7zu KB", CodeCache::max_capacity() / K); 3283 out->print_cr(" Committed size : %7zu KB", CodeCache::capacity() / K); 3284 out->print_cr(" Unallocated capacity : %7zu KB", CodeCache::unallocated_capacity() / K); 3285 out->cr(); 3286 } 3287 3288 // Note: tty_lock must not be held upon entry to this function. 3289 // Print functions called from herein do "micro-locking" on tty_lock. 3290 // That's a tradeoff which keeps together important blocks of output. 3291 // At the same time, continuous tty_lock hold time is kept in check, 3292 // preventing concurrently printing threads from stalling a long time. 3293 void CompileBroker::print_heapinfo(outputStream* out, const char* function, size_t granularity) { 3294 TimeStamp ts_total; 3295 TimeStamp ts_global; 3296 TimeStamp ts; 3297 3298 bool allFun = !strcmp(function, "all"); 3299 bool aggregate = !strcmp(function, "aggregate") || !strcmp(function, "analyze") || allFun; 3300 bool usedSpace = !strcmp(function, "UsedSpace") || allFun; 3301 bool freeSpace = !strcmp(function, "FreeSpace") || allFun; 3302 bool methodCount = !strcmp(function, "MethodCount") || allFun; 3303 bool methodSpace = !strcmp(function, "MethodSpace") || allFun; 3304 bool methodAge = !strcmp(function, "MethodAge") || allFun; 3305 bool methodNames = !strcmp(function, "MethodNames") || allFun; 3306 bool discard = !strcmp(function, "discard") || allFun; 3307 3308 if (out == nullptr) { 3309 out = tty; 3310 } 3311 3312 if (!(aggregate || usedSpace || freeSpace || methodCount || methodSpace || methodAge || methodNames || discard)) { 3313 out->print_cr("\n__ CodeHeapStateAnalytics: Function %s is not supported", function); 3314 out->cr(); 3315 return; 3316 } 3317 3318 ts_total.update(); // record starting point 3319 3320 if (aggregate) { 3321 print_info(out); 3322 } 3323 3324 // We hold the CodeHeapStateAnalytics_lock all the time, from here until we leave this function. 3325 // That prevents other threads from destroying (making inconsistent) our view on the CodeHeap. 3326 // When we request individual parts of the analysis via the jcmd interface, it is possible 3327 // that in between another thread (another jcmd user or the vm running into CodeCache OOM) 3328 // updated the aggregated data. We will then see a modified, but again consistent, view 3329 // on the CodeHeap. That's a tolerable tradeoff we have to accept because we can't hold 3330 // a lock across user interaction. 3331 3332 // We should definitely acquire this lock before acquiring Compile_lock and CodeCache_lock. 3333 // CodeHeapStateAnalytics_lock may be held by a concurrent thread for a long time, 3334 // leading to an unnecessarily long hold time of the other locks we acquired before. 3335 ts.update(); // record starting point 3336 MutexLocker mu0(CodeHeapStateAnalytics_lock, Mutex::_safepoint_check_flag); 3337 out->print_cr("\n__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________\n", ts.seconds()); 3338 3339 // Holding the CodeCache_lock protects from concurrent alterations of the CodeCache. 3340 // Unfortunately, such protection is not sufficient: 3341 // When a new nmethod is created via ciEnv::register_method(), the 3342 // Compile_lock is taken first. After some initializations, 3343 // nmethod::new_nmethod() takes over, grabbing the CodeCache_lock 3344 // immediately (after finalizing the oop references). To lock out concurrent 3345 // modifiers, we have to grab both locks as well in the described sequence. 3346 // 3347 // If we serve an "allFun" call, it is beneficial to hold CodeCache_lock and Compile_lock 3348 // for the entire duration of aggregation and printing. That makes sure we see 3349 // a consistent picture and do not run into issues caused by concurrent alterations. 3350 bool should_take_Compile_lock = !SafepointSynchronize::is_at_safepoint() && 3351 !Compile_lock->owned_by_self(); 3352 bool should_take_CodeCache_lock = !SafepointSynchronize::is_at_safepoint() && 3353 !CodeCache_lock->owned_by_self(); 3354 bool take_global_lock_1 = allFun && should_take_Compile_lock; 3355 bool take_global_lock_2 = allFun && should_take_CodeCache_lock; 3356 bool take_function_lock_1 = !allFun && should_take_Compile_lock; 3357 bool take_function_lock_2 = !allFun && should_take_CodeCache_lock; 3358 bool take_global_locks = take_global_lock_1 || take_global_lock_2; 3359 bool take_function_locks = take_function_lock_1 || take_function_lock_2; 3360 3361 ts_global.update(); // record starting point 3362 3363 ConditionalMutexLocker mu1(Compile_lock, take_global_lock_1, Mutex::_safepoint_check_flag); 3364 ConditionalMutexLocker mu2(CodeCache_lock, take_global_lock_2, Mutex::_no_safepoint_check_flag); 3365 if (take_global_locks) { 3366 out->print_cr("\n__ Compile & CodeCache (global) lock wait took %10.3f seconds _________\n", ts_global.seconds()); 3367 ts_global.update(); // record starting point 3368 } 3369 3370 if (aggregate) { 3371 ts.update(); // record starting point 3372 ConditionalMutexLocker mu11(Compile_lock, take_function_lock_1, Mutex::_safepoint_check_flag); 3373 ConditionalMutexLocker mu22(CodeCache_lock, take_function_lock_2, Mutex::_no_safepoint_check_flag); 3374 if (take_function_locks) { 3375 out->print_cr("\n__ Compile & CodeCache (function) lock wait took %10.3f seconds _________\n", ts.seconds()); 3376 } 3377 3378 ts.update(); // record starting point 3379 CodeCache::aggregate(out, granularity); 3380 if (take_function_locks) { 3381 out->print_cr("\n__ Compile & CodeCache (function) lock hold took %10.3f seconds _________\n", ts.seconds()); 3382 } 3383 } 3384 3385 if (usedSpace) CodeCache::print_usedSpace(out); 3386 if (freeSpace) CodeCache::print_freeSpace(out); 3387 if (methodCount) CodeCache::print_count(out); 3388 if (methodSpace) CodeCache::print_space(out); 3389 if (methodAge) CodeCache::print_age(out); 3390 if (methodNames) { 3391 if (allFun) { 3392 // print_names() can only be used safely if the locks have been continuously held 3393 // since aggregation begin. That is true only for function "all". 3394 CodeCache::print_names(out); 3395 } else { 3396 out->print_cr("\nCodeHeapStateAnalytics: Function 'MethodNames' is only available as part of function 'all'"); 3397 } 3398 } 3399 if (discard) CodeCache::discard(out); 3400 3401 if (take_global_locks) { 3402 out->print_cr("\n__ Compile & CodeCache (global) lock hold took %10.3f seconds _________\n", ts_global.seconds()); 3403 } 3404 out->print_cr("\n__ CodeHeapStateAnalytics total duration %10.3f seconds _________\n", ts_total.seconds()); 3405 }