1 /* 2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/aotLinkedClassBulkLoader.hpp" 26 #include "cds/cdsConfig.hpp" 27 #include "classfile/javaClasses.inline.hpp" 28 #include "classfile/symbolTable.hpp" 29 #include "classfile/vmClasses.hpp" 30 #include "classfile/vmSymbols.hpp" 31 #include "code/codeCache.hpp" 32 #include "code/codeHeapState.hpp" 33 #include "code/dependencyContext.hpp" 34 #include "code/SCCache.hpp" 35 #include "compiler/compilationLog.hpp" 36 #include "compiler/compilationMemoryStatistic.hpp" 37 #include "compiler/compilationPolicy.hpp" 38 #include "compiler/compileBroker.hpp" 39 #include "compiler/compilerDefinitions.inline.hpp" 40 #include "compiler/compileLog.hpp" 41 #include "compiler/compilerEvent.hpp" 42 #include "compiler/compilerOracle.hpp" 43 #include "compiler/directivesParser.hpp" 44 #include "compiler/recompilationPolicy.hpp" 45 #include "gc/shared/memAllocator.hpp" 46 #include "interpreter/linkResolver.hpp" 47 #include "jvm.h" 48 #include "jfr/jfrEvents.hpp" 49 #include "logging/log.hpp" 50 #include "logging/logStream.hpp" 51 #include "memory/allocation.inline.hpp" 52 #include "memory/resourceArea.hpp" 53 #include "memory/universe.hpp" 54 #include "oops/methodData.hpp" 55 #include "oops/method.inline.hpp" 56 #include "oops/oop.inline.hpp" 57 #include "prims/jvmtiExport.hpp" 58 #include "prims/nativeLookup.hpp" 59 #include "prims/whitebox.hpp" 60 #include "runtime/atomic.hpp" 61 #include "runtime/escapeBarrier.hpp" 62 #include "runtime/globals_extension.hpp" 63 #include "runtime/handles.inline.hpp" 64 #include "runtime/init.hpp" 65 #include "runtime/interfaceSupport.inline.hpp" 66 #include "runtime/java.hpp" 67 #include "runtime/javaCalls.hpp" 68 #include "runtime/jniHandles.inline.hpp" 69 #include "runtime/os.hpp" 70 #include "runtime/perfData.hpp" 71 #include "runtime/safepointVerifiers.hpp" 72 #include "runtime/sharedRuntime.hpp" 73 #include "runtime/threads.hpp" 74 #include "runtime/threadSMR.inline.hpp" 75 #include "runtime/timerTrace.hpp" 76 #include "runtime/vframe.inline.hpp" 77 #include "services/management.hpp" 78 #include "utilities/debug.hpp" 79 #include "utilities/dtrace.hpp" 80 #include "utilities/events.hpp" 81 #include "utilities/formatBuffer.hpp" 82 #include "utilities/macros.hpp" 83 #include "utilities/nonblockingQueue.inline.hpp" 84 #ifdef COMPILER1 85 #include "c1/c1_Compiler.hpp" 86 #endif 87 #ifdef COMPILER2 88 #include "opto/c2compiler.hpp" 89 #endif 90 #if INCLUDE_JVMCI 91 #include "jvmci/jvmciEnv.hpp" 92 #include "jvmci/jvmciRuntime.hpp" 93 #endif 94 95 #ifdef DTRACE_ENABLED 96 97 // Only bother with this argument setup if dtrace is available 98 99 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \ 100 { \ 101 Symbol* klass_name = (method)->klass_name(); \ 102 Symbol* name = (method)->name(); \ 103 Symbol* signature = (method)->signature(); \ 104 HOTSPOT_METHOD_COMPILE_BEGIN( \ 105 (char *) comp_name, strlen(comp_name), \ 106 (char *) klass_name->bytes(), klass_name->utf8_length(), \ 107 (char *) name->bytes(), name->utf8_length(), \ 108 (char *) signature->bytes(), signature->utf8_length()); \ 109 } 110 111 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \ 112 { \ 113 Symbol* klass_name = (method)->klass_name(); \ 114 Symbol* name = (method)->name(); \ 115 Symbol* signature = (method)->signature(); \ 116 HOTSPOT_METHOD_COMPILE_END( \ 117 (char *) comp_name, strlen(comp_name), \ 118 (char *) klass_name->bytes(), klass_name->utf8_length(), \ 119 (char *) name->bytes(), name->utf8_length(), \ 120 (char *) signature->bytes(), signature->utf8_length(), (success)); \ 121 } 122 123 #else // ndef DTRACE_ENABLED 124 125 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) 126 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) 127 128 #endif // ndef DTRACE_ENABLED 129 130 bool CompileBroker::_initialized = false; 131 bool CompileBroker::_replay_initialized = false; 132 volatile bool CompileBroker::_should_block = false; 133 volatile int CompileBroker::_print_compilation_warning = 0; 134 volatile jint CompileBroker::_should_compile_new_jobs = run_compilation; 135 136 // The installed compiler(s) 137 AbstractCompiler* CompileBroker::_compilers[3]; 138 139 // The maximum numbers of compiler threads to be determined during startup. 140 int CompileBroker::_c1_count = 0; 141 int CompileBroker::_c2_count = 0; 142 int CompileBroker::_c3_count = 0; 143 int CompileBroker::_sc_count = 0; 144 145 // An array of compiler names as Java String objects 146 jobject* CompileBroker::_compiler1_objects = nullptr; 147 jobject* CompileBroker::_compiler2_objects = nullptr; 148 jobject* CompileBroker::_compiler3_objects = nullptr; 149 jobject* CompileBroker::_sc_objects = nullptr; 150 151 CompileLog** CompileBroker::_compiler1_logs = nullptr; 152 CompileLog** CompileBroker::_compiler2_logs = nullptr; 153 CompileLog** CompileBroker::_compiler3_logs = nullptr; 154 CompileLog** CompileBroker::_sc_logs = nullptr; 155 156 // These counters are used to assign an unique ID to each compilation. 157 volatile jint CompileBroker::_compilation_id = 0; 158 volatile jint CompileBroker::_osr_compilation_id = 0; 159 volatile jint CompileBroker::_native_compilation_id = 0; 160 161 // Performance counters 162 PerfCounter* CompileBroker::_perf_total_compilation = nullptr; 163 PerfCounter* CompileBroker::_perf_osr_compilation = nullptr; 164 PerfCounter* CompileBroker::_perf_standard_compilation = nullptr; 165 166 PerfCounter* CompileBroker::_perf_total_bailout_count = nullptr; 167 PerfCounter* CompileBroker::_perf_total_invalidated_count = nullptr; 168 PerfCounter* CompileBroker::_perf_total_compile_count = nullptr; 169 PerfCounter* CompileBroker::_perf_total_osr_compile_count = nullptr; 170 PerfCounter* CompileBroker::_perf_total_standard_compile_count = nullptr; 171 172 PerfCounter* CompileBroker::_perf_sum_osr_bytes_compiled = nullptr; 173 PerfCounter* CompileBroker::_perf_sum_standard_bytes_compiled = nullptr; 174 PerfCounter* CompileBroker::_perf_sum_nmethod_size = nullptr; 175 PerfCounter* CompileBroker::_perf_sum_nmethod_code_size = nullptr; 176 177 PerfStringVariable* CompileBroker::_perf_last_method = nullptr; 178 PerfStringVariable* CompileBroker::_perf_last_failed_method = nullptr; 179 PerfStringVariable* CompileBroker::_perf_last_invalidated_method = nullptr; 180 PerfVariable* CompileBroker::_perf_last_compile_type = nullptr; 181 PerfVariable* CompileBroker::_perf_last_compile_size = nullptr; 182 PerfVariable* CompileBroker::_perf_last_failed_type = nullptr; 183 PerfVariable* CompileBroker::_perf_last_invalidated_type = nullptr; 184 185 // Timers and counters for generating statistics 186 elapsedTimer CompileBroker::_t_total_compilation; 187 elapsedTimer CompileBroker::_t_osr_compilation; 188 elapsedTimer CompileBroker::_t_standard_compilation; 189 elapsedTimer CompileBroker::_t_invalidated_compilation; 190 elapsedTimer CompileBroker::_t_bailedout_compilation; 191 192 uint CompileBroker::_total_bailout_count = 0; 193 uint CompileBroker::_total_invalidated_count = 0; 194 uint CompileBroker::_total_not_entrant_count = 0; 195 uint CompileBroker::_total_compile_count = 0; 196 uint CompileBroker::_total_osr_compile_count = 0; 197 uint CompileBroker::_total_standard_compile_count = 0; 198 uint CompileBroker::_total_compiler_stopped_count = 0; 199 uint CompileBroker::_total_compiler_restarted_count = 0; 200 201 uint CompileBroker::_sum_osr_bytes_compiled = 0; 202 uint CompileBroker::_sum_standard_bytes_compiled = 0; 203 uint CompileBroker::_sum_nmethod_size = 0; 204 uint CompileBroker::_sum_nmethod_code_size = 0; 205 206 jlong CompileBroker::_peak_compilation_time = 0; 207 208 CompilerStatistics CompileBroker::_stats_per_level[CompLevel_full_optimization]; 209 CompilerStatistics CompileBroker::_scc_stats; 210 CompilerStatistics CompileBroker::_scc_stats_per_level[CompLevel_full_optimization + 1]; 211 212 CompileQueue* CompileBroker::_c3_compile_queue = nullptr; 213 CompileQueue* CompileBroker::_c2_compile_queue = nullptr; 214 CompileQueue* CompileBroker::_c1_compile_queue = nullptr; 215 CompileQueue* CompileBroker::_sc1_compile_queue = nullptr; 216 CompileQueue* CompileBroker::_sc2_compile_queue = nullptr; 217 218 bool compileBroker_init() { 219 if (LogEvents) { 220 CompilationLog::init(); 221 } 222 223 // init directives stack, adding default directive 224 DirectivesStack::init(); 225 226 if (DirectivesParser::has_file()) { 227 return DirectivesParser::parse_from_flag(); 228 } else if (CompilerDirectivesPrint) { 229 // Print default directive even when no other was added 230 DirectivesStack::print(tty); 231 } 232 233 return true; 234 } 235 236 CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) { 237 CompilerThread* thread = CompilerThread::current(); 238 thread->set_task(task); 239 CompileLog* log = thread->log(); 240 if (log != nullptr && !task->is_unloaded()) task->log_task_start(log); 241 } 242 243 CompileTaskWrapper::~CompileTaskWrapper() { 244 CompilerThread* thread = CompilerThread::current(); 245 CompileTask* task = thread->task(); 246 CompileLog* log = thread->log(); 247 AbstractCompiler* comp = thread->compiler(); 248 if (log != nullptr && !task->is_unloaded()) task->log_task_done(log); 249 thread->set_task(nullptr); 250 thread->set_env(nullptr); 251 if (task->is_blocking()) { 252 bool free_task = false; 253 { 254 MutexLocker notifier(thread, task->lock()); 255 task->mark_complete(); 256 #if INCLUDE_JVMCI 257 if (comp->is_jvmci()) { 258 if (!task->has_waiter()) { 259 // The waiting thread timed out and thus did not free the task. 260 free_task = true; 261 } 262 task->set_blocking_jvmci_compile_state(nullptr); 263 } 264 #endif 265 if (!free_task) { 266 // Notify the waiting thread that the compilation has completed 267 // so that it can free the task. 268 task->lock()->notify_all(); 269 } 270 } 271 if (free_task) { 272 // The task can only be freed once the task lock is released. 273 CompileTask::free(task); 274 } 275 } else { 276 task->mark_complete(); 277 278 // By convention, the compiling thread is responsible for 279 // recycling a non-blocking CompileTask. 280 CompileTask::free(task); 281 } 282 } 283 284 /** 285 * Check if a CompilerThread can be removed and update count if requested. 286 */ 287 bool CompileBroker::can_remove(CompilerThread *ct, bool do_it) { 288 assert(UseDynamicNumberOfCompilerThreads, "or shouldn't be here"); 289 if (!ReduceNumberOfCompilerThreads) return false; 290 291 if (RecompilationPolicy::have_recompilation_work()) return false; 292 293 AbstractCompiler *compiler = ct->compiler(); 294 int compiler_count = compiler->num_compiler_threads(); 295 bool c1 = compiler->is_c1(); 296 297 // Keep at least 1 compiler thread of each type. 298 if (compiler_count < 2) return false; 299 300 // Keep thread alive for at least some time. 301 if (ct->idle_time_millis() < (c1 ? 500 : 100)) return false; 302 303 #if INCLUDE_JVMCI 304 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) { 305 // Handles for JVMCI thread objects may get released concurrently. 306 if (do_it) { 307 assert(CompileThread_lock->owner() == ct, "must be holding lock"); 308 } else { 309 // Skip check if it's the last thread and let caller check again. 310 return true; 311 } 312 } 313 #endif 314 315 // We only allow the last compiler thread of each type to get removed. 316 jobject last_compiler = c1 ? compiler1_object(compiler_count - 1) 317 : compiler2_object(compiler_count - 1); 318 if (ct->threadObj() == JNIHandles::resolve_non_null(last_compiler)) { 319 if (do_it) { 320 assert_locked_or_safepoint(CompileThread_lock); // Update must be consistent. 321 compiler->set_num_compiler_threads(compiler_count - 1); 322 #if INCLUDE_JVMCI 323 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) { 324 // Old j.l.Thread object can die when no longer referenced elsewhere. 325 JNIHandles::destroy_global(compiler2_object(compiler_count - 1)); 326 _compiler2_objects[compiler_count - 1] = nullptr; 327 } 328 #endif 329 } 330 return true; 331 } 332 return false; 333 } 334 335 /** 336 * Add a CompileTask to a CompileQueue. 337 */ 338 void CompileQueue::add(CompileTask* task) { 339 assert(_lock->owned_by_self(), "must own lock"); 340 341 task->set_next(nullptr); 342 task->set_prev(nullptr); 343 344 if (_last == nullptr) { 345 // The compile queue is empty. 346 assert(_first == nullptr, "queue is empty"); 347 _first = task; 348 _last = task; 349 } else { 350 // Append the task to the queue. 351 assert(_last->next() == nullptr, "not last"); 352 _last->set_next(task); 353 task->set_prev(_last); 354 _last = task; 355 } 356 ++_size; 357 ++_total_added; 358 if (_size > _peak_size) { 359 _peak_size = _size; 360 } 361 362 // Mark the method as being in the compile queue. 363 task->method()->set_queued_for_compilation(); 364 365 task->mark_queued(os::elapsed_counter()); 366 367 if (CIPrintCompileQueue) { 368 print_tty(); 369 } 370 371 if (LogCompilation && xtty != nullptr) { 372 task->log_task_queued(); 373 } 374 375 if (TrainingData::need_data() && 376 !CDSConfig::is_dumping_final_static_archive()) { // FIXME: !!! MetaspaceShared::preload_and_dump() temporarily enables RecordTraining !!! 377 CompileTrainingData* tdata = CompileTrainingData::make(task); 378 if (tdata != nullptr) { 379 task->set_training_data(tdata); 380 } 381 } 382 383 // Notify CompilerThreads that a task is available. 384 _lock->notify_all(); 385 } 386 387 void CompileQueue::add_pending(CompileTask* task) { 388 assert(_lock->owned_by_self() == false, "must NOT own lock"); 389 assert(UseLockFreeCompileQueues, ""); 390 task->method()->set_queued_for_compilation(); 391 _queue.push(*task); 392 // FIXME: additional coordination needed? e.g., is it possible for compiler thread to block w/o processing pending tasks? 393 if (is_empty()) { 394 MutexLocker ml(_lock); 395 _lock->notify_all(); 396 } 397 } 398 399 static bool process_pending(CompileTask* task) { 400 // guarantee(task->method()->queued_for_compilation(), ""); 401 if (task->is_unloaded()) { 402 return true; // unloaded 403 } 404 task->method()->set_queued_for_compilation(); // FIXME 405 if (task->method()->pending_queue_processed()) { 406 return true; // already queued 407 } 408 // Mark the method as being in the compile queue. 409 task->method()->set_pending_queue_processed(); 410 if (CompileBroker::compilation_is_complete(task->method(), task->osr_bci(), task->comp_level(), 411 task->requires_online_compilation(), task->compile_reason())) { 412 return true; // already compiled 413 } 414 return false; // active 415 } 416 417 void CompileQueue::transfer_pending() { 418 assert(_lock->owned_by_self(), "must own lock"); 419 420 CompileTask* task; 421 while ((task = _queue.pop()) != nullptr) { 422 bool is_stale = process_pending(task); 423 if (is_stale) { 424 task->set_next(_first_stale); 425 task->set_prev(nullptr); 426 _first_stale = task; 427 } else { 428 add(task); 429 } 430 } 431 } 432 433 /** 434 * Empties compilation queue by putting all compilation tasks onto 435 * a freelist. Furthermore, the method wakes up all threads that are 436 * waiting on a compilation task to finish. This can happen if background 437 * compilation is disabled. 438 */ 439 void CompileQueue::free_all() { 440 MutexLocker mu(_lock); 441 transfer_pending(); 442 443 CompileTask* next = _first; 444 445 // Iterate over all tasks in the compile queue 446 while (next != nullptr) { 447 CompileTask* current = next; 448 next = current->next(); 449 bool found_waiter = false; 450 { 451 MutexLocker ct_lock(current->lock()); 452 assert(current->waiting_for_completion_count() <= 1, "more than one thread are waiting for task"); 453 if (current->waiting_for_completion_count() > 0) { 454 // If another thread waits for this task, we must wake them up 455 // so they will stop waiting and free the task. 456 current->lock()->notify(); 457 found_waiter = true; 458 } 459 } 460 if (!found_waiter) { 461 // If no one was waiting for this task, we need to free it ourselves. In this case, the task 462 // is also certainly unlocked, because, again, there is no waiter. 463 // Otherwise, by convention, it's the waiters responsibility to free the task. 464 // Put the task back on the freelist. 465 CompileTask::free(current); 466 } 467 } 468 _first = nullptr; 469 _last = nullptr; 470 471 // Wake up all threads that block on the queue. 472 _lock->notify_all(); 473 } 474 475 /** 476 * Get the next CompileTask from a CompileQueue 477 */ 478 CompileTask* CompileQueue::get(CompilerThread* thread) { 479 // save methods from RedefineClasses across safepoint 480 // across compile queue lock below. 481 methodHandle save_method; 482 methodHandle save_hot_method; 483 484 MonitorLocker locker(_lock); 485 transfer_pending(); 486 487 RecompilationPolicy::sample_load_average(); 488 489 // If _first is null we have no more compile jobs. There are two reasons for 490 // having no compile jobs: First, we compiled everything we wanted. Second, 491 // we ran out of code cache so compilation has been disabled. In the latter 492 // case we perform code cache sweeps to free memory such that we can re-enable 493 // compilation. 494 while (_first == nullptr) { 495 // Exit loop if compilation is disabled forever 496 if (CompileBroker::is_compilation_disabled_forever()) { 497 return nullptr; 498 } 499 500 AbstractCompiler* compiler = thread->compiler(); 501 guarantee(compiler != nullptr, "Compiler object must exist"); 502 compiler->on_empty_queue(this, thread); 503 if (_first != nullptr) { 504 // The call to on_empty_queue may have temporarily unlocked the MCQ lock 505 // so check again whether any tasks were added to the queue. 506 break; 507 } 508 509 // If we have added stale tasks, there might be waiters that want 510 // the notification these tasks have failed. Normally, this would 511 // be done by a compiler thread that would perform the purge at 512 // the end of some compilation. But, if compile queue is empty, 513 // there is no guarantee compilers would run and do the purge. 514 // Do the purge here and now to unblock the waiters. 515 // Perform this until we run out of stale tasks. 516 while (_first_stale != nullptr) { 517 purge_stale_tasks(); 518 } 519 if (_first != nullptr) { 520 // Purge stale tasks may have transferred some new tasks, 521 // so check again. 522 break; 523 } 524 525 // If there are no compilation tasks and we can compile new jobs 526 // (i.e., there is enough free space in the code cache) there is 527 // no need to invoke the GC. 528 // We need a timed wait here, since compiler threads can exit if compilation 529 // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads 530 // is not critical and we do not want idle compiler threads to wake up too often. 531 locker.wait(5*1000); 532 533 transfer_pending(); // reacquired lock 534 535 if (RecompilationPolicy::have_recompilation_work()) return nullptr; 536 537 if (UseDynamicNumberOfCompilerThreads && _first == nullptr) { 538 // Still nothing to compile. Give caller a chance to stop this thread. 539 if (CompileBroker::can_remove(CompilerThread::current(), false)) return nullptr; 540 } 541 } 542 543 if (CompileBroker::is_compilation_disabled_forever()) { 544 return nullptr; 545 } 546 547 CompileTask* task; 548 { 549 NoSafepointVerifier nsv; 550 task = CompilationPolicy::select_task(this, thread); 551 if (task != nullptr) { 552 task = task->select_for_compilation(); 553 } 554 } 555 556 if (task != nullptr) { 557 // Save method pointers across unlock safepoint. The task is removed from 558 // the compilation queue, which is walked during RedefineClasses. 559 Thread* thread = Thread::current(); 560 save_method = methodHandle(thread, task->method()); 561 save_hot_method = methodHandle(thread, task->hot_method()); 562 563 remove(task); 564 } 565 purge_stale_tasks(); // may temporarily release MCQ lock 566 return task; 567 } 568 569 // Clean & deallocate stale compile tasks. 570 // Temporarily releases MethodCompileQueue lock. 571 void CompileQueue::purge_stale_tasks() { 572 assert(_lock->owned_by_self(), "must own lock"); 573 if (_first_stale != nullptr) { 574 // Stale tasks are purged when MCQ lock is released, 575 // but _first_stale updates are protected by MCQ lock. 576 // Once task processing starts and MCQ lock is released, 577 // other compiler threads can reuse _first_stale. 578 CompileTask* head = _first_stale; 579 _first_stale = nullptr; 580 { 581 MutexUnlocker ul(_lock); 582 for (CompileTask* task = head; task != nullptr; ) { 583 CompileTask* next_task = task->next(); 584 CompileTaskWrapper ctw(task); // Frees the task 585 task->set_failure_reason("stale task"); 586 task = next_task; 587 } 588 } 589 transfer_pending(); // transfer pending after reacquiring MCQ lock 590 } 591 } 592 593 void CompileQueue::remove(CompileTask* task) { 594 assert(_lock->owned_by_self(), "must own lock"); 595 if (task->prev() != nullptr) { 596 task->prev()->set_next(task->next()); 597 } else { 598 // max is the first element 599 assert(task == _first, "Sanity"); 600 _first = task->next(); 601 } 602 603 if (task->next() != nullptr) { 604 task->next()->set_prev(task->prev()); 605 } else { 606 // max is the last element 607 assert(task == _last, "Sanity"); 608 _last = task->prev(); 609 } 610 --_size; 611 ++_total_removed; 612 } 613 614 void CompileQueue::remove_and_mark_stale(CompileTask* task) { 615 assert(_lock->owned_by_self(), "must own lock"); 616 remove(task); 617 618 // Enqueue the task for reclamation (should be done outside MCQ lock) 619 task->set_next(_first_stale); 620 task->set_prev(nullptr); 621 _first_stale = task; 622 } 623 624 // methods in the compile queue need to be marked as used on the stack 625 // so that they don't get reclaimed by Redefine Classes 626 void CompileQueue::mark_on_stack() { 627 for (CompileTask* task = _first; task != nullptr; task = task->next()) { 628 task->mark_on_stack(); 629 } 630 for (CompileTask* task = _queue.first(); !_queue.is_end(task); task = task->next()) { 631 assert(task != nullptr, ""); 632 task->mark_on_stack(); 633 } 634 } 635 636 637 CompileQueue* CompileBroker::compile_queue(int comp_level, bool is_scc) { 638 if (is_c2_compile(comp_level)) return ((is_scc && (_sc_count > 0)) ? _sc2_compile_queue : _c2_compile_queue); 639 if (is_c1_compile(comp_level)) return ((is_scc && (_sc_count > 0)) ? _sc1_compile_queue : _c1_compile_queue); 640 return nullptr; 641 } 642 643 CompileQueue* CompileBroker::c1_compile_queue() { 644 return _c1_compile_queue; 645 } 646 647 CompileQueue* CompileBroker::c2_compile_queue() { 648 return _c2_compile_queue; 649 } 650 651 void CompileBroker::print_compile_queues(outputStream* st) { 652 st->print_cr("Current compiles: "); 653 654 char buf[2000]; 655 int buflen = sizeof(buf); 656 Threads::print_threads_compiling(st, buf, buflen, /* short_form = */ true); 657 658 st->cr(); 659 if (_c1_compile_queue != nullptr) { 660 _c1_compile_queue->print(st); 661 } 662 if (_c2_compile_queue != nullptr) { 663 _c2_compile_queue->print(st); 664 } 665 if (_c3_compile_queue != nullptr) { 666 _c3_compile_queue->print(st); 667 } 668 if (_sc1_compile_queue != nullptr) { 669 _sc1_compile_queue->print(st); 670 } 671 if (_sc2_compile_queue != nullptr) { 672 _sc2_compile_queue->print(st); 673 } 674 } 675 676 void CompileQueue::print(outputStream* st) { 677 assert_locked_or_safepoint(_lock); 678 st->print_cr("%s:", name()); 679 CompileTask* task = _first; 680 if (task == nullptr) { 681 st->print_cr("Empty"); 682 } else { 683 while (task != nullptr) { 684 task->print(st, nullptr, true, true); 685 task = task->next(); 686 } 687 } 688 st->cr(); 689 } 690 691 void CompileQueue::print_tty() { 692 stringStream ss; 693 // Dump the compile queue into a buffer before locking the tty 694 print(&ss); 695 { 696 ttyLocker ttyl; 697 tty->print("%s", ss.freeze()); 698 } 699 } 700 701 CompilerCounters::CompilerCounters() { 702 _current_method[0] = '\0'; 703 _compile_type = CompileBroker::no_compile; 704 } 705 706 #if INCLUDE_JFR && COMPILER2_OR_JVMCI 707 // It appends new compiler phase names to growable array phase_names(a new CompilerPhaseType mapping 708 // in compiler/compilerEvent.cpp) and registers it with its serializer. 709 // 710 // c2 uses explicit CompilerPhaseType idToPhase mapping in opto/phasetype.hpp, 711 // so if c2 is used, it should be always registered first. 712 // This function is called during vm initialization. 713 static void register_jfr_phasetype_serializer(CompilerType compiler_type) { 714 ResourceMark rm; 715 static bool first_registration = true; 716 if (compiler_type == compiler_jvmci) { 717 CompilerEvent::PhaseEvent::get_phase_id("NOT_A_PHASE_NAME", false, false, false); 718 first_registration = false; 719 #ifdef COMPILER2 720 } else if (compiler_type == compiler_c2) { 721 assert(first_registration, "invariant"); // c2 must be registered first. 722 for (int i = 0; i < PHASE_NUM_TYPES; i++) { 723 const char* phase_name = CompilerPhaseTypeHelper::to_description((CompilerPhaseType) i); 724 CompilerEvent::PhaseEvent::get_phase_id(phase_name, false, false, false); 725 } 726 first_registration = false; 727 #endif // COMPILER2 728 } 729 } 730 #endif // INCLUDE_JFR && COMPILER2_OR_JVMCI 731 732 // ------------------------------------------------------------------ 733 // CompileBroker::compilation_init 734 // 735 // Initialize the Compilation object 736 void CompileBroker::compilation_init(JavaThread* THREAD) { 737 // No need to initialize compilation system if we do not use it. 738 if (!UseCompiler) { 739 return; 740 } 741 // Set the interface to the current compiler(s). 742 _c1_count = CompilationPolicy::c1_count(); 743 _c2_count = CompilationPolicy::c2_count(); 744 _c3_count = CompilationPolicy::c3_count(); 745 _sc_count = CompilationPolicy::sc_count(); 746 747 #if INCLUDE_JVMCI 748 if (EnableJVMCI) { 749 // This is creating a JVMCICompiler singleton. 750 JVMCICompiler* jvmci = new JVMCICompiler(); 751 752 if (UseJVMCICompiler) { 753 _compilers[1] = jvmci; 754 if (FLAG_IS_DEFAULT(JVMCIThreads)) { 755 if (BootstrapJVMCI) { 756 // JVMCI will bootstrap so give it more threads 757 _c2_count = MIN2(32, os::active_processor_count()); 758 } 759 } else { 760 _c2_count = JVMCIThreads; 761 } 762 if (FLAG_IS_DEFAULT(JVMCIHostThreads)) { 763 } else { 764 #ifdef COMPILER1 765 _c1_count = JVMCIHostThreads; 766 #endif // COMPILER1 767 } 768 #ifdef COMPILER2 769 if (SCCache::is_on() && (_c3_count > 0)) { 770 _compilers[2] = new C2Compiler(); 771 } 772 #endif 773 } 774 } 775 #endif // INCLUDE_JVMCI 776 777 #ifdef COMPILER1 778 if (_c1_count > 0) { 779 _compilers[0] = new Compiler(); 780 } 781 #endif // COMPILER1 782 783 #ifdef COMPILER2 784 if (true JVMCI_ONLY( && !UseJVMCICompiler)) { 785 if (_c2_count > 0) { 786 _compilers[1] = new C2Compiler(); 787 // Register c2 first as c2 CompilerPhaseType idToPhase mapping is explicit. 788 // idToPhase mapping for c2 is in opto/phasetype.hpp 789 JFR_ONLY(register_jfr_phasetype_serializer(compiler_c2);) 790 } 791 } 792 #endif // COMPILER2 793 794 #if INCLUDE_JVMCI 795 // Register after c2 registration. 796 // JVMCI CompilerPhaseType idToPhase mapping is dynamic. 797 if (EnableJVMCI) { 798 JFR_ONLY(register_jfr_phasetype_serializer(compiler_jvmci);) 799 } 800 #endif // INCLUDE_JVMCI 801 802 if (CompilerOracle::should_collect_memstat()) { 803 CompilationMemoryStatistic::initialize(); 804 } 805 806 // Start the compiler thread(s) 807 init_compiler_threads(); 808 // totalTime performance counter is always created as it is required 809 // by the implementation of java.lang.management.CompilationMXBean. 810 { 811 // Ensure OOM leads to vm_exit_during_initialization. 812 EXCEPTION_MARK; 813 _perf_total_compilation = 814 PerfDataManager::create_counter(JAVA_CI, "totalTime", 815 PerfData::U_Ticks, CHECK); 816 } 817 818 if (UsePerfData) { 819 820 EXCEPTION_MARK; 821 822 // create the jvmstat performance counters 823 _perf_osr_compilation = 824 PerfDataManager::create_counter(SUN_CI, "osrTime", 825 PerfData::U_Ticks, CHECK); 826 827 _perf_standard_compilation = 828 PerfDataManager::create_counter(SUN_CI, "standardTime", 829 PerfData::U_Ticks, CHECK); 830 831 _perf_total_bailout_count = 832 PerfDataManager::create_counter(SUN_CI, "totalBailouts", 833 PerfData::U_Events, CHECK); 834 835 _perf_total_invalidated_count = 836 PerfDataManager::create_counter(SUN_CI, "totalInvalidates", 837 PerfData::U_Events, CHECK); 838 839 _perf_total_compile_count = 840 PerfDataManager::create_counter(SUN_CI, "totalCompiles", 841 PerfData::U_Events, CHECK); 842 _perf_total_osr_compile_count = 843 PerfDataManager::create_counter(SUN_CI, "osrCompiles", 844 PerfData::U_Events, CHECK); 845 846 _perf_total_standard_compile_count = 847 PerfDataManager::create_counter(SUN_CI, "standardCompiles", 848 PerfData::U_Events, CHECK); 849 850 _perf_sum_osr_bytes_compiled = 851 PerfDataManager::create_counter(SUN_CI, "osrBytes", 852 PerfData::U_Bytes, CHECK); 853 854 _perf_sum_standard_bytes_compiled = 855 PerfDataManager::create_counter(SUN_CI, "standardBytes", 856 PerfData::U_Bytes, CHECK); 857 858 _perf_sum_nmethod_size = 859 PerfDataManager::create_counter(SUN_CI, "nmethodSize", 860 PerfData::U_Bytes, CHECK); 861 862 _perf_sum_nmethod_code_size = 863 PerfDataManager::create_counter(SUN_CI, "nmethodCodeSize", 864 PerfData::U_Bytes, CHECK); 865 866 _perf_last_method = 867 PerfDataManager::create_string_variable(SUN_CI, "lastMethod", 868 CompilerCounters::cmname_buffer_length, 869 "", CHECK); 870 871 _perf_last_failed_method = 872 PerfDataManager::create_string_variable(SUN_CI, "lastFailedMethod", 873 CompilerCounters::cmname_buffer_length, 874 "", CHECK); 875 876 _perf_last_invalidated_method = 877 PerfDataManager::create_string_variable(SUN_CI, "lastInvalidatedMethod", 878 CompilerCounters::cmname_buffer_length, 879 "", CHECK); 880 881 _perf_last_compile_type = 882 PerfDataManager::create_variable(SUN_CI, "lastType", 883 PerfData::U_None, 884 (jlong)CompileBroker::no_compile, 885 CHECK); 886 887 _perf_last_compile_size = 888 PerfDataManager::create_variable(SUN_CI, "lastSize", 889 PerfData::U_Bytes, 890 (jlong)CompileBroker::no_compile, 891 CHECK); 892 893 894 _perf_last_failed_type = 895 PerfDataManager::create_variable(SUN_CI, "lastFailedType", 896 PerfData::U_None, 897 (jlong)CompileBroker::no_compile, 898 CHECK); 899 900 _perf_last_invalidated_type = 901 PerfDataManager::create_variable(SUN_CI, "lastInvalidatedType", 902 PerfData::U_None, 903 (jlong)CompileBroker::no_compile, 904 CHECK); 905 } 906 907 log_info(scc, init)("CompileBroker is initialized"); 908 _initialized = true; 909 } 910 911 Handle CompileBroker::create_thread_oop(const char* name, TRAPS) { 912 Handle thread_oop = JavaThread::create_system_thread_object(name, CHECK_NH); 913 return thread_oop; 914 } 915 916 void TrainingReplayThread::training_replay_thread_entry(JavaThread* thread, TRAPS) { 917 CompilationPolicy::replay_training_at_init_loop(thread); 918 } 919 920 #if defined(ASSERT) && COMPILER2_OR_JVMCI 921 // Entry for DeoptimizeObjectsALotThread. The threads are started in 922 // CompileBroker::init_compiler_threads() iff DeoptimizeObjectsALot is enabled 923 void DeoptimizeObjectsALotThread::deopt_objs_alot_thread_entry(JavaThread* thread, TRAPS) { 924 DeoptimizeObjectsALotThread* dt = ((DeoptimizeObjectsALotThread*) thread); 925 bool enter_single_loop; 926 { 927 MonitorLocker ml(dt, EscapeBarrier_lock, Mutex::_no_safepoint_check_flag); 928 static int single_thread_count = 0; 929 enter_single_loop = single_thread_count++ < DeoptimizeObjectsALotThreadCountSingle; 930 } 931 if (enter_single_loop) { 932 dt->deoptimize_objects_alot_loop_single(); 933 } else { 934 dt->deoptimize_objects_alot_loop_all(); 935 } 936 } 937 938 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each 939 // barrier targets a single thread which is selected round robin. 940 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_single() { 941 HandleMark hm(this); 942 while (true) { 943 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *deoptee_thread = jtiwh.next(); ) { 944 { // Begin new scope for escape barrier 945 HandleMarkCleaner hmc(this); 946 ResourceMark rm(this); 947 EscapeBarrier eb(true, this, deoptee_thread); 948 eb.deoptimize_objects(100); 949 } 950 // Now sleep after the escape barriers destructor resumed deoptee_thread. 951 sleep(DeoptimizeObjectsALotInterval); 952 } 953 } 954 } 955 956 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each 957 // barrier targets all java threads in the vm at once. 958 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_all() { 959 HandleMark hm(this); 960 while (true) { 961 { // Begin new scope for escape barrier 962 HandleMarkCleaner hmc(this); 963 ResourceMark rm(this); 964 EscapeBarrier eb(true, this); 965 eb.deoptimize_objects_all_threads(); 966 } 967 // Now sleep after the escape barriers destructor resumed the java threads. 968 sleep(DeoptimizeObjectsALotInterval); 969 } 970 } 971 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI 972 973 974 JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, CompileQueue* queue, AbstractCompiler* comp, JavaThread* THREAD) { 975 Handle thread_oop(THREAD, JNIHandles::resolve_non_null(thread_handle)); 976 977 if (java_lang_Thread::thread(thread_oop()) != nullptr) { 978 assert(type == compiler_t, "should only happen with reused compiler threads"); 979 // The compiler thread hasn't actually exited yet so don't try to reuse it 980 return nullptr; 981 } 982 983 JavaThread* new_thread = nullptr; 984 switch (type) { 985 case compiler_t: 986 assert(comp != nullptr, "Compiler instance missing."); 987 if (!InjectCompilerCreationFailure || comp->num_compiler_threads() == 0) { 988 CompilerCounters* counters = new CompilerCounters(); 989 new_thread = new CompilerThread(queue, counters); 990 } 991 break; 992 #if defined(ASSERT) && COMPILER2_OR_JVMCI 993 case deoptimizer_t: 994 new_thread = new DeoptimizeObjectsALotThread(); 995 break; 996 #endif // ASSERT 997 case training_replay_t: 998 new_thread = new TrainingReplayThread(); 999 break; 1000 default: 1001 ShouldNotReachHere(); 1002 } 1003 1004 // At this point the new CompilerThread data-races with this startup 1005 // thread (which is the main thread and NOT the VM thread). 1006 // This means Java bytecodes being executed at startup can 1007 // queue compile jobs which will run at whatever default priority the 1008 // newly created CompilerThread runs at. 1009 1010 1011 // At this point it may be possible that no osthread was created for the 1012 // JavaThread due to lack of resources. We will handle that failure below. 1013 // Also check new_thread so that static analysis is happy. 1014 if (new_thread != nullptr && new_thread->osthread() != nullptr) { 1015 1016 if (type == compiler_t) { 1017 CompilerThread::cast(new_thread)->set_compiler(comp); 1018 } 1019 1020 // Note that we cannot call os::set_priority because it expects Java 1021 // priorities and we are *explicitly* using OS priorities so that it's 1022 // possible to set the compiler thread priority higher than any Java 1023 // thread. 1024 1025 int native_prio = CompilerThreadPriority; 1026 if (native_prio == -1) { 1027 if (UseCriticalCompilerThreadPriority) { 1028 native_prio = os::java_to_os_priority[CriticalPriority]; 1029 } else { 1030 native_prio = os::java_to_os_priority[NearMaxPriority]; 1031 } 1032 } 1033 os::set_native_priority(new_thread, native_prio); 1034 1035 // Note that this only sets the JavaThread _priority field, which by 1036 // definition is limited to Java priorities and not OS priorities. 1037 JavaThread::start_internal_daemon(THREAD, new_thread, thread_oop, NearMaxPriority); 1038 1039 } else { // osthread initialization failure 1040 if (UseDynamicNumberOfCompilerThreads && type == compiler_t 1041 && comp->num_compiler_threads() > 0) { 1042 // The new thread is not known to Thread-SMR yet so we can just delete. 1043 delete new_thread; 1044 return nullptr; 1045 } else { 1046 vm_exit_during_initialization("java.lang.OutOfMemoryError", 1047 os::native_thread_creation_failed_msg()); 1048 } 1049 } 1050 1051 os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS) 1052 1053 return new_thread; 1054 } 1055 1056 static bool trace_compiler_threads() { 1057 LogTarget(Debug, jit, thread) lt; 1058 return TraceCompilerThreads || lt.is_enabled(); 1059 } 1060 1061 static jobject create_compiler_thread(AbstractCompiler* compiler, int i, TRAPS) { 1062 char name_buffer[256]; 1063 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", compiler->name(), i); 1064 Handle thread_oop = JavaThread::create_system_thread_object(name_buffer, CHECK_NULL); 1065 return JNIHandles::make_global(thread_oop); 1066 } 1067 1068 static void print_compiler_threads(stringStream& msg) { 1069 if (TraceCompilerThreads) { 1070 tty->print_cr("%7d %s", (int)tty->time_stamp().milliseconds(), msg.as_string()); 1071 } 1072 LogTarget(Debug, jit, thread) lt; 1073 if (lt.is_enabled()) { 1074 LogStream ls(lt); 1075 ls.print_cr("%s", msg.as_string()); 1076 } 1077 } 1078 1079 static void print_compiler_thread(JavaThread *ct) { 1080 if (trace_compiler_threads()) { 1081 ResourceMark rm; 1082 ThreadsListHandle tlh; // name() depends on the TLH. 1083 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1084 stringStream msg; 1085 msg.print("Added initial compiler thread %s", ct->name()); 1086 print_compiler_threads(msg); 1087 } 1088 } 1089 1090 void CompileBroker::init_compiler_threads() { 1091 // Ensure any exceptions lead to vm_exit_during_initialization. 1092 EXCEPTION_MARK; 1093 #if !defined(ZERO) 1094 assert(_c2_count > 0 || _c1_count > 0, "No compilers?"); 1095 #endif // !ZERO 1096 // Initialize the compilation queue 1097 if (_c2_count > 0) { 1098 const char* name = JVMCI_ONLY(UseJVMCICompiler ? "JVMCI compile queue" :) "C2 compile queue"; 1099 _c2_compile_queue = new CompileQueue(name, MethodCompileQueueC2_lock); 1100 _compiler2_objects = NEW_C_HEAP_ARRAY(jobject, _c2_count, mtCompiler); 1101 _compiler2_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c2_count, mtCompiler); 1102 } 1103 if (_c1_count > 0) { 1104 _c1_compile_queue = new CompileQueue("C1 compile queue", MethodCompileQueueC1_lock); 1105 _compiler1_objects = NEW_C_HEAP_ARRAY(jobject, _c1_count, mtCompiler); 1106 _compiler1_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c1_count, mtCompiler); 1107 } 1108 1109 if (_c3_count > 0) { 1110 const char* name = "C2 compile queue"; 1111 _c3_compile_queue = new CompileQueue(name, MethodCompileQueueC3_lock); 1112 _compiler3_objects = NEW_C_HEAP_ARRAY(jobject, _c3_count, mtCompiler); 1113 _compiler3_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c3_count, mtCompiler); 1114 } 1115 if (_sc_count > 0) { 1116 if (_c1_count > 0) { // C1 is present 1117 _sc1_compile_queue = new CompileQueue("C1 SC compile queue", MethodCompileQueueSC1_lock); 1118 } 1119 if (_c2_count > 0) { // C2 is present 1120 _sc2_compile_queue = new CompileQueue("C2 SC compile queue", MethodCompileQueueSC2_lock); 1121 } 1122 _sc_objects = NEW_C_HEAP_ARRAY(jobject, _sc_count, mtCompiler); 1123 _sc_logs = NEW_C_HEAP_ARRAY(CompileLog*, _sc_count, mtCompiler); 1124 } 1125 char name_buffer[256]; 1126 1127 for (int i = 0; i < _c2_count; i++) { 1128 // Create a name for our thread. 1129 jobject thread_handle = create_compiler_thread(_compilers[1], i, CHECK); 1130 _compiler2_objects[i] = thread_handle; 1131 _compiler2_logs[i] = nullptr; 1132 1133 if (!UseDynamicNumberOfCompilerThreads || i == 0) { 1134 JavaThread *ct = make_thread(compiler_t, thread_handle, _c2_compile_queue, _compilers[1], THREAD); 1135 assert(ct != nullptr, "should have been handled for initial thread"); 1136 _compilers[1]->set_num_compiler_threads(i + 1); 1137 print_compiler_thread(ct); 1138 } 1139 } 1140 1141 for (int i = 0; i < _c1_count; i++) { 1142 // Create a name for our thread. 1143 jobject thread_handle = create_compiler_thread(_compilers[0], i, CHECK); 1144 _compiler1_objects[i] = thread_handle; 1145 _compiler1_logs[i] = nullptr; 1146 1147 if (!UseDynamicNumberOfCompilerThreads || i == 0) { 1148 JavaThread *ct = make_thread(compiler_t, thread_handle, _c1_compile_queue, _compilers[0], THREAD); 1149 assert(ct != nullptr, "should have been handled for initial thread"); 1150 _compilers[0]->set_num_compiler_threads(i + 1); 1151 print_compiler_thread(ct); 1152 } 1153 } 1154 1155 for (int i = 0; i < _c3_count; i++) { 1156 // Create a name for our thread. 1157 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C2 CompilerThread%d", i); 1158 Handle thread_oop = create_thread_oop(name_buffer, CHECK); 1159 jobject thread_handle = JNIHandles::make_global(thread_oop); 1160 _compiler3_objects[i] = thread_handle; 1161 _compiler3_logs[i] = nullptr; 1162 1163 JavaThread *ct = make_thread(compiler_t, thread_handle, _c3_compile_queue, _compilers[2], THREAD); 1164 assert(ct != nullptr, "should have been handled for initial thread"); 1165 _compilers[2]->set_num_compiler_threads(i + 1); 1166 print_compiler_thread(ct); 1167 } 1168 1169 if (_sc_count > 0) { 1170 int i = 0; 1171 if (_c1_count > 0) { // C1 is present 1172 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d SC CompilerThread", 1); 1173 Handle thread_oop = create_thread_oop(name_buffer, CHECK); 1174 jobject thread_handle = JNIHandles::make_global(thread_oop); 1175 _sc_objects[i] = thread_handle; 1176 _sc_logs[i] = nullptr; 1177 i++; 1178 1179 JavaThread *ct = make_thread(compiler_t, thread_handle, _sc1_compile_queue, _compilers[0], THREAD); 1180 assert(ct != nullptr, "should have been handled for initial thread"); 1181 print_compiler_thread(ct); 1182 } 1183 if (_c2_count > 0) { // C2 is present 1184 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d SC CompilerThread", 2); 1185 Handle thread_oop = create_thread_oop(name_buffer, CHECK); 1186 jobject thread_handle = JNIHandles::make_global(thread_oop); 1187 _sc_objects[i] = thread_handle; 1188 _sc_logs[i] = nullptr; 1189 1190 JavaThread *ct = make_thread(compiler_t, thread_handle, _sc2_compile_queue, _compilers[1], THREAD); 1191 assert(ct != nullptr, "should have been handled for initial thread"); 1192 print_compiler_thread(ct); 1193 } 1194 } 1195 1196 if (UsePerfData) { 1197 PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, _c1_count + _c2_count + _c3_count, CHECK); 1198 } 1199 1200 #if defined(ASSERT) && COMPILER2_OR_JVMCI 1201 if (DeoptimizeObjectsALot) { 1202 // Initialize and start the object deoptimizer threads 1203 const int total_count = DeoptimizeObjectsALotThreadCountSingle + DeoptimizeObjectsALotThreadCountAll; 1204 for (int count = 0; count < total_count; count++) { 1205 Handle thread_oop = JavaThread::create_system_thread_object("Deoptimize objects a lot single mode", CHECK); 1206 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); 1207 make_thread(deoptimizer_t, thread_handle, nullptr, nullptr, THREAD); 1208 } 1209 } 1210 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI 1211 } 1212 1213 void CompileBroker::init_training_replay() { 1214 // Ensure any exceptions lead to vm_exit_during_initialization. 1215 EXCEPTION_MARK; 1216 if (TrainingData::have_data()) { 1217 if (UseConcurrentTrainingReplay) { 1218 Handle thread_oop = create_thread_oop("Training replay thread", CHECK); 1219 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); 1220 make_thread(training_replay_t, thread_handle, nullptr, nullptr, THREAD); 1221 } 1222 _replay_initialized = true; 1223 } 1224 } 1225 1226 void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) { 1227 1228 int old_c2_count = 0, new_c2_count = 0, old_c1_count = 0, new_c1_count = 0; 1229 const int c2_tasks_per_thread = 2, c1_tasks_per_thread = 4; 1230 1231 // Quick check if we already have enough compiler threads without taking the lock. 1232 // Numbers may change concurrently, so we read them again after we have the lock. 1233 if (_c2_compile_queue != nullptr) { 1234 old_c2_count = get_c2_thread_count(); 1235 new_c2_count = MIN2(_c2_count, _c2_compile_queue->size() / c2_tasks_per_thread); 1236 } 1237 if (_c1_compile_queue != nullptr) { 1238 old_c1_count = get_c1_thread_count(); 1239 new_c1_count = MIN2(_c1_count, _c1_compile_queue->size() / c1_tasks_per_thread); 1240 } 1241 if (new_c2_count <= old_c2_count && new_c1_count <= old_c1_count) return; 1242 1243 // Now, we do the more expensive operations. 1244 julong free_memory = os::free_memory(); 1245 // If SegmentedCodeCache is off, both values refer to the single heap (with type CodeBlobType::All). 1246 size_t available_cc_np = CodeCache::unallocated_capacity(CodeBlobType::MethodNonProfiled), 1247 available_cc_p = CodeCache::unallocated_capacity(CodeBlobType::MethodProfiled); 1248 1249 // Only attempt to start additional threads if the lock is free. 1250 if (!CompileThread_lock->try_lock()) return; 1251 1252 if (_c2_compile_queue != nullptr) { 1253 old_c2_count = get_c2_thread_count(); 1254 new_c2_count = MIN4(_c2_count, 1255 _c2_compile_queue->size() / c2_tasks_per_thread, 1256 (int)(free_memory / (200*M)), 1257 (int)(available_cc_np / (128*K))); 1258 1259 for (int i = old_c2_count; i < new_c2_count; i++) { 1260 #if INCLUDE_JVMCI 1261 if (UseJVMCICompiler && !UseJVMCINativeLibrary && _compiler2_objects[i] == nullptr) { 1262 // Native compiler threads as used in C1/C2 can reuse the j.l.Thread objects as their 1263 // existence is completely hidden from the rest of the VM (and those compiler threads can't 1264 // call Java code to do the creation anyway). 1265 // 1266 // For pure Java JVMCI we have to create new j.l.Thread objects as they are visible and we 1267 // can see unexpected thread lifecycle transitions if we bind them to new JavaThreads. For 1268 // native library JVMCI it's preferred to use the C1/C2 strategy as this avoids unnecessary 1269 // coupling with Java. 1270 if (!THREAD->can_call_java()) break; 1271 char name_buffer[256]; 1272 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", _compilers[1]->name(), i); 1273 Handle thread_oop; 1274 { 1275 // We have to give up the lock temporarily for the Java calls. 1276 MutexUnlocker mu(CompileThread_lock); 1277 thread_oop = JavaThread::create_system_thread_object(name_buffer, THREAD); 1278 } 1279 if (HAS_PENDING_EXCEPTION) { 1280 if (trace_compiler_threads()) { 1281 ResourceMark rm; 1282 stringStream msg; 1283 msg.print_cr("JVMCI compiler thread creation failed:"); 1284 PENDING_EXCEPTION->print_on(&msg); 1285 print_compiler_threads(msg); 1286 } 1287 CLEAR_PENDING_EXCEPTION; 1288 break; 1289 } 1290 // Check if another thread has beaten us during the Java calls. 1291 if (get_c2_thread_count() != i) break; 1292 jobject thread_handle = JNIHandles::make_global(thread_oop); 1293 assert(compiler2_object(i) == nullptr, "Old one must be released!"); 1294 _compiler2_objects[i] = thread_handle; 1295 } 1296 #endif 1297 guarantee(compiler2_object(i) != nullptr, "Thread oop must exist"); 1298 JavaThread *ct = make_thread(compiler_t, compiler2_object(i), _c2_compile_queue, _compilers[1], THREAD); 1299 if (ct == nullptr) break; 1300 _compilers[1]->set_num_compiler_threads(i + 1); 1301 if (trace_compiler_threads()) { 1302 ResourceMark rm; 1303 ThreadsListHandle tlh; // name() depends on the TLH. 1304 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1305 stringStream msg; 1306 msg.print("Added compiler thread %s (free memory: %dMB, available non-profiled code cache: %dMB)", 1307 ct->name(), (int)(free_memory/M), (int)(available_cc_np/M)); 1308 print_compiler_threads(msg); 1309 } 1310 } 1311 } 1312 1313 if (_c1_compile_queue != nullptr) { 1314 old_c1_count = get_c1_thread_count(); 1315 new_c1_count = MIN4(_c1_count, 1316 _c1_compile_queue->size() / c1_tasks_per_thread, 1317 (int)(free_memory / (100*M)), 1318 (int)(available_cc_p / (128*K))); 1319 1320 for (int i = old_c1_count; i < new_c1_count; i++) { 1321 JavaThread *ct = make_thread(compiler_t, compiler1_object(i), _c1_compile_queue, _compilers[0], THREAD); 1322 if (ct == nullptr) break; 1323 _compilers[0]->set_num_compiler_threads(i + 1); 1324 if (trace_compiler_threads()) { 1325 ResourceMark rm; 1326 ThreadsListHandle tlh; // name() depends on the TLH. 1327 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1328 stringStream msg; 1329 msg.print("Added compiler thread %s (free memory: %dMB, available profiled code cache: %dMB)", 1330 ct->name(), (int)(free_memory/M), (int)(available_cc_p/M)); 1331 print_compiler_threads(msg); 1332 } 1333 } 1334 } 1335 1336 CompileThread_lock->unlock(); 1337 } 1338 1339 1340 /** 1341 * Set the methods on the stack as on_stack so that redefine classes doesn't 1342 * reclaim them. This method is executed at a safepoint. 1343 */ 1344 void CompileBroker::mark_on_stack() { 1345 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 1346 // Since we are at a safepoint, we do not need a lock to access 1347 // the compile queues. 1348 if (_c3_compile_queue != nullptr) { 1349 _c3_compile_queue->mark_on_stack(); 1350 } 1351 if (_c2_compile_queue != nullptr) { 1352 _c2_compile_queue->mark_on_stack(); 1353 } 1354 if (_c1_compile_queue != nullptr) { 1355 _c1_compile_queue->mark_on_stack(); 1356 } 1357 if (_sc1_compile_queue != nullptr) { 1358 _sc1_compile_queue->mark_on_stack(); 1359 } 1360 if (_sc2_compile_queue != nullptr) { 1361 _sc2_compile_queue->mark_on_stack(); 1362 } 1363 } 1364 1365 // ------------------------------------------------------------------ 1366 // CompileBroker::compile_method 1367 // 1368 // Request compilation of a method. 1369 void CompileBroker::compile_method_base(const methodHandle& method, 1370 int osr_bci, 1371 int comp_level, 1372 const methodHandle& hot_method, 1373 int hot_count, 1374 CompileTask::CompileReason compile_reason, 1375 bool requires_online_compilation, 1376 bool blocking, 1377 Thread* thread) { 1378 guarantee(!method->is_abstract(), "cannot compile abstract methods"); 1379 assert(method->method_holder()->is_instance_klass(), 1380 "sanity check"); 1381 assert(!method->method_holder()->is_not_initialized() || 1382 compile_reason == CompileTask::Reason_Preload || 1383 compile_reason == CompileTask::Reason_Precompile || 1384 compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized"); 1385 assert(!method->is_method_handle_intrinsic(), "do not enqueue these guys"); 1386 1387 if (CIPrintRequests) { 1388 tty->print("request: "); 1389 method->print_short_name(tty); 1390 if (osr_bci != InvocationEntryBci) { 1391 tty->print(" osr_bci: %d", osr_bci); 1392 } 1393 tty->print(" level: %d comment: %s count: %d", comp_level, CompileTask::reason_name(compile_reason), hot_count); 1394 if (!hot_method.is_null()) { 1395 tty->print(" hot: "); 1396 if (hot_method() != method()) { 1397 hot_method->print_short_name(tty); 1398 } else { 1399 tty->print("yes"); 1400 } 1401 } 1402 tty->cr(); 1403 } 1404 1405 // A request has been made for compilation. Before we do any 1406 // real work, check to see if the method has been compiled 1407 // in the meantime with a definitive result. 1408 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1409 return; 1410 } 1411 1412 #ifndef PRODUCT 1413 if (osr_bci != -1 && !FLAG_IS_DEFAULT(OSROnlyBCI)) { 1414 if ((OSROnlyBCI > 0) ? (OSROnlyBCI != osr_bci) : (-OSROnlyBCI == osr_bci)) { 1415 // Positive OSROnlyBCI means only compile that bci. Negative means don't compile that BCI. 1416 return; 1417 } 1418 } 1419 #endif 1420 1421 // If this method is already in the compile queue, then 1422 // we do not block the current thread. 1423 if (compilation_is_in_queue(method)) { 1424 // We may want to decay our counter a bit here to prevent 1425 // multiple denied requests for compilation. This is an 1426 // open compilation policy issue. Note: The other possibility, 1427 // in the case that this is a blocking compile request, is to have 1428 // all subsequent blocking requesters wait for completion of 1429 // ongoing compiles. Note that in this case we'll need a protocol 1430 // for freeing the associated compile tasks. [Or we could have 1431 // a single static monitor on which all these waiters sleep.] 1432 return; 1433 } 1434 1435 // Tiered policy requires MethodCounters to exist before adding a method to 1436 // the queue. Create if we don't have them yet. 1437 if (compile_reason != CompileTask::Reason_Preload) { 1438 method->get_method_counters(thread); 1439 } 1440 1441 SCCEntry* scc_entry = find_scc_entry(method, osr_bci, comp_level, compile_reason, requires_online_compilation); 1442 bool is_scc = (scc_entry != nullptr); 1443 1444 // Outputs from the following MutexLocker block: 1445 CompileTask* task = nullptr; 1446 CompileQueue* queue; 1447 #if INCLUDE_JVMCI 1448 if (is_c2_compile(comp_level) && compiler2()->is_jvmci() && compiler3() != nullptr && 1449 ((JVMCICompiler*)compiler2())->force_comp_at_level_simple(method)) { 1450 assert(_c3_compile_queue != nullptr, "sanity"); 1451 queue = _c3_compile_queue; // JVMCI compiler's methods compilation 1452 } else 1453 #endif 1454 queue = compile_queue(comp_level, is_scc); 1455 1456 // Acquire our lock. 1457 { 1458 ConditionalMutexLocker locker(thread, queue->lock(), !UseLockFreeCompileQueues); 1459 1460 // Make sure the method has not slipped into the queues since 1461 // last we checked; note that those checks were "fast bail-outs". 1462 // Here we need to be more careful, see 14012000 below. 1463 if (compilation_is_in_queue(method)) { 1464 return; 1465 } 1466 1467 // We need to check again to see if the compilation has 1468 // completed. A previous compilation may have registered 1469 // some result. 1470 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1471 return; 1472 } 1473 1474 // We now know that this compilation is not pending, complete, 1475 // or prohibited. Assign a compile_id to this compilation 1476 // and check to see if it is in our [Start..Stop) range. 1477 int compile_id = assign_compile_id(method, osr_bci); 1478 if (compile_id == 0) { 1479 // The compilation falls outside the allowed range. 1480 return; 1481 } 1482 1483 #if INCLUDE_JVMCI 1484 if (UseJVMCICompiler && blocking) { 1485 // Don't allow blocking compiles for requests triggered by JVMCI. 1486 if (thread->is_Compiler_thread()) { 1487 blocking = false; 1488 } 1489 1490 // In libjvmci, JVMCI initialization should not deadlock with other threads 1491 if (!UseJVMCINativeLibrary) { 1492 // Don't allow blocking compiles if inside a class initializer or while performing class loading 1493 vframeStream vfst(JavaThread::cast(thread)); 1494 for (; !vfst.at_end(); vfst.next()) { 1495 if (vfst.method()->is_static_initializer() || 1496 (vfst.method()->method_holder()->is_subclass_of(vmClasses::ClassLoader_klass()) && 1497 vfst.method()->name() == vmSymbols::loadClass_name())) { 1498 blocking = false; 1499 break; 1500 } 1501 } 1502 1503 // Don't allow blocking compilation requests to JVMCI 1504 // if JVMCI itself is not yet initialized 1505 if (!JVMCI::is_compiler_initialized() && compiler(comp_level)->is_jvmci()) { 1506 blocking = false; 1507 } 1508 } 1509 1510 // Don't allow blocking compilation requests if we are in JVMCIRuntime::shutdown 1511 // to avoid deadlock between compiler thread(s) and threads run at shutdown 1512 // such as the DestroyJavaVM thread. 1513 if (JVMCI::in_shutdown()) { 1514 blocking = false; 1515 } 1516 } 1517 #endif // INCLUDE_JVMCI 1518 1519 // We will enter the compilation in the queue. 1520 // 14012000: Note that this sets the queued_for_compile bits in 1521 // the target method. We can now reason that a method cannot be 1522 // queued for compilation more than once, as follows: 1523 // Before a thread queues a task for compilation, it first acquires 1524 // the compile queue lock, then checks if the method's queued bits 1525 // are set or it has already been compiled. Thus there can not be two 1526 // instances of a compilation task for the same method on the 1527 // compilation queue. Consider now the case where the compilation 1528 // thread has already removed a task for that method from the queue 1529 // and is in the midst of compiling it. In this case, the 1530 // queued_for_compile bits must be set in the method (and these 1531 // will be visible to the current thread, since the bits were set 1532 // under protection of the compile queue lock, which we hold now. 1533 // When the compilation completes, the compiler thread first sets 1534 // the compilation result and then clears the queued_for_compile 1535 // bits. Neither of these actions are protected by a barrier (or done 1536 // under the protection of a lock), so the only guarantee we have 1537 // (on machines with TSO (Total Store Order)) is that these values 1538 // will update in that order. As a result, the only combinations of 1539 // these bits that the current thread will see are, in temporal order: 1540 // <RESULT, QUEUE> : 1541 // <0, 1> : in compile queue, but not yet compiled 1542 // <1, 1> : compiled but queue bit not cleared 1543 // <1, 0> : compiled and queue bit cleared 1544 // Because we first check the queue bits then check the result bits, 1545 // we are assured that we cannot introduce a duplicate task. 1546 // Note that if we did the tests in the reverse order (i.e. check 1547 // result then check queued bit), we could get the result bit before 1548 // the compilation completed, and the queue bit after the compilation 1549 // completed, and end up introducing a "duplicate" (redundant) task. 1550 // In that case, the compiler thread should first check if a method 1551 // has already been compiled before trying to compile it. 1552 // NOTE: in the event that there are multiple compiler threads and 1553 // there is de-optimization/recompilation, things will get hairy, 1554 // and in that case it's best to protect both the testing (here) of 1555 // these bits, and their updating (here and elsewhere) under a 1556 // common lock. 1557 task = create_compile_task(queue, 1558 compile_id, method, 1559 osr_bci, comp_level, 1560 hot_method, hot_count, scc_entry, compile_reason, 1561 requires_online_compilation, blocking); 1562 1563 if (task->is_scc() && (_sc_count > 0)) { 1564 // Put it on SC queue 1565 queue = is_c1_compile(comp_level) ? _sc1_compile_queue : _sc2_compile_queue; 1566 } 1567 1568 if (UseLockFreeCompileQueues) { 1569 assert(queue->lock()->owned_by_self() == false, ""); 1570 queue->add_pending(task); 1571 } else { 1572 queue->add(task); 1573 } 1574 } 1575 1576 if (blocking) { 1577 wait_for_completion(task); 1578 } 1579 } 1580 1581 SCCEntry* CompileBroker::find_scc_entry(const methodHandle& method, int osr_bci, int comp_level, 1582 CompileTask::CompileReason compile_reason, 1583 bool requires_online_compilation) { 1584 SCCEntry* scc_entry = nullptr; 1585 if (osr_bci == InvocationEntryBci && !requires_online_compilation && SCCache::is_on_for_read()) { 1586 // Check for cached code. 1587 if (compile_reason == CompileTask::Reason_Preload) { 1588 scc_entry = method->scc_entry(); 1589 assert(scc_entry != nullptr && scc_entry->for_preload(), "sanity"); 1590 } else { 1591 scc_entry = SCCache::find_code_entry(method, comp_level); 1592 } 1593 } 1594 return scc_entry; 1595 } 1596 1597 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, 1598 int comp_level, 1599 const methodHandle& hot_method, int hot_count, 1600 bool requires_online_compilation, 1601 CompileTask::CompileReason compile_reason, 1602 TRAPS) { 1603 // Do nothing if compilebroker is not initialized or compiles are submitted on level none 1604 if (!_initialized || comp_level == CompLevel_none) { 1605 return nullptr; 1606 } 1607 1608 #if INCLUDE_JVMCI 1609 if (EnableJVMCI && UseJVMCICompiler && 1610 comp_level == CompLevel_full_optimization && !AOTLinkedClassBulkLoader::class_preloading_finished()) { 1611 return nullptr; 1612 } 1613 #endif 1614 1615 AbstractCompiler *comp = CompileBroker::compiler(comp_level); 1616 assert(comp != nullptr, "Ensure we have a compiler"); 1617 1618 #if INCLUDE_JVMCI 1619 if (comp->is_jvmci() && !JVMCI::can_initialize_JVMCI()) { 1620 // JVMCI compilation is not yet initializable. 1621 return nullptr; 1622 } 1623 #endif 1624 1625 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp); 1626 // CompileBroker::compile_method can trap and can have pending async exception. 1627 nmethod* nm = CompileBroker::compile_method(method, osr_bci, comp_level, hot_method, hot_count, requires_online_compilation, compile_reason, directive, THREAD); 1628 DirectivesStack::release(directive); 1629 return nm; 1630 } 1631 1632 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, 1633 int comp_level, 1634 const methodHandle& hot_method, int hot_count, 1635 bool requires_online_compilation, 1636 CompileTask::CompileReason compile_reason, 1637 DirectiveSet* directive, 1638 TRAPS) { 1639 1640 // make sure arguments make sense 1641 assert(method->method_holder()->is_instance_klass(), "not an instance method"); 1642 assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range"); 1643 assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods"); 1644 assert(!method->method_holder()->is_not_initialized() || 1645 compile_reason == CompileTask::Reason_Preload || 1646 compile_reason == CompileTask::Reason_Precompile || 1647 compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized"); 1648 // return quickly if possible 1649 1650 if (PrecompileOnlyAndExit && !CompileTask::reason_is_precompiled(compile_reason)) { 1651 return nullptr; 1652 } 1653 1654 // lock, make sure that the compilation 1655 // isn't prohibited in a straightforward way. 1656 AbstractCompiler* comp = CompileBroker::compiler(comp_level); 1657 if (comp == nullptr || compilation_is_prohibited(method, osr_bci, comp_level, directive->ExcludeOption)) { 1658 return nullptr; 1659 } 1660 1661 if (osr_bci == InvocationEntryBci) { 1662 // standard compilation 1663 nmethod* method_code = method->code(); 1664 if (method_code != nullptr) { 1665 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1666 return method_code; 1667 } 1668 } 1669 if (method->is_not_compilable(comp_level)) { 1670 return nullptr; 1671 } 1672 } else { 1673 // osr compilation 1674 // We accept a higher level osr method 1675 nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false); 1676 if (nm != nullptr) return nm; 1677 if (method->is_not_osr_compilable(comp_level)) return nullptr; 1678 } 1679 1680 assert(!HAS_PENDING_EXCEPTION, "No exception should be present"); 1681 // some prerequisites that are compiler specific 1682 if (compile_reason != CompileTask::Reason_Preload && (comp->is_c2() || comp->is_jvmci())) { 1683 InternalOOMEMark iom(THREAD); 1684 method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NONASYNC_NULL); 1685 // Resolve all classes seen in the signature of the method 1686 // we are compiling. 1687 Method::load_signature_classes(method, CHECK_AND_CLEAR_NONASYNC_NULL); 1688 } 1689 1690 // If the method is native, do the lookup in the thread requesting 1691 // the compilation. Native lookups can load code, which is not 1692 // permitted during compilation. 1693 // 1694 // Note: A native method implies non-osr compilation which is 1695 // checked with an assertion at the entry of this method. 1696 if (method->is_native() && !method->is_method_handle_intrinsic()) { 1697 address adr = NativeLookup::lookup(method, THREAD); 1698 if (HAS_PENDING_EXCEPTION) { 1699 // In case of an exception looking up the method, we just forget 1700 // about it. The interpreter will kick-in and throw the exception. 1701 method->set_not_compilable("NativeLookup::lookup failed"); // implies is_not_osr_compilable() 1702 CLEAR_PENDING_EXCEPTION; 1703 return nullptr; 1704 } 1705 assert(method->has_native_function(), "must have native code by now"); 1706 } 1707 1708 // RedefineClasses() has replaced this method; just return 1709 if (method->is_old()) { 1710 return nullptr; 1711 } 1712 1713 // JVMTI -- post_compile_event requires jmethod_id() that may require 1714 // a lock the compiling thread can not acquire. Prefetch it here. 1715 if (JvmtiExport::should_post_compiled_method_load()) { 1716 method->jmethod_id(); 1717 } 1718 1719 // do the compilation 1720 if (method->is_native()) { 1721 if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) { 1722 #if defined(IA32) && !defined(ZERO) 1723 // The following native methods: 1724 // 1725 // java.lang.Float.intBitsToFloat 1726 // java.lang.Float.floatToRawIntBits 1727 // java.lang.Double.longBitsToDouble 1728 // java.lang.Double.doubleToRawLongBits 1729 // 1730 // are called through the interpreter even if interpreter native stubs 1731 // are not preferred (i.e., calling through adapter handlers is preferred). 1732 // The reason is that on x86_32 signaling NaNs (sNaNs) are not preserved 1733 // if the version of the methods from the native libraries is called. 1734 // As the interpreter and the C2-intrinsified version of the methods preserves 1735 // sNaNs, that would result in an inconsistent way of handling of sNaNs. 1736 if ((UseSSE >= 1 && 1737 (method->intrinsic_id() == vmIntrinsics::_intBitsToFloat || 1738 method->intrinsic_id() == vmIntrinsics::_floatToRawIntBits)) || 1739 (UseSSE >= 2 && 1740 (method->intrinsic_id() == vmIntrinsics::_longBitsToDouble || 1741 method->intrinsic_id() == vmIntrinsics::_doubleToRawLongBits))) { 1742 return nullptr; 1743 } 1744 #endif // IA32 && !ZERO 1745 1746 // To properly handle the appendix argument for out-of-line calls we are using a small trampoline that 1747 // pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime). 1748 // 1749 // Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter 1750 // in this case. If we can't generate one and use it we can not execute the out-of-line method handle calls. 1751 AdapterHandlerLibrary::create_native_wrapper(method); 1752 } else { 1753 return nullptr; 1754 } 1755 } else { 1756 // If the compiler is shut off due to code cache getting full 1757 // fail out now so blocking compiles dont hang the java thread 1758 if (!should_compile_new_jobs()) { 1759 return nullptr; 1760 } 1761 bool is_blocking = ReplayCompiles || 1762 !directive->BackgroundCompilationOption || 1763 (compile_reason == CompileTask::Reason_Precompile) || 1764 (compile_reason == CompileTask::Reason_PrecompileForPreload); 1765 compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, compile_reason, requires_online_compilation, is_blocking, THREAD); 1766 } 1767 1768 // return requested nmethod 1769 // We accept a higher level osr method 1770 if (osr_bci == InvocationEntryBci) { 1771 return method->code(); 1772 } 1773 return method->lookup_osr_nmethod_for(osr_bci, comp_level, false); 1774 } 1775 1776 1777 // ------------------------------------------------------------------ 1778 // CompileBroker::compilation_is_complete 1779 // 1780 // See if compilation of this method is already complete. 1781 bool CompileBroker::compilation_is_complete(Method* method, 1782 int osr_bci, 1783 int comp_level, 1784 bool online_only, 1785 CompileTask::CompileReason compile_reason) { 1786 if (compile_reason == CompileTask::Reason_Precompile || 1787 compile_reason == CompileTask::Reason_PrecompileForPreload) { 1788 return false; // FIXME: any restrictions? 1789 } 1790 bool is_osr = (osr_bci != standard_entry_bci); 1791 if (is_osr) { 1792 if (method->is_not_osr_compilable(comp_level)) { 1793 return true; 1794 } else { 1795 nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true); 1796 return (result != nullptr); 1797 } 1798 } else { 1799 if (method->is_not_compilable(comp_level)) { 1800 return true; 1801 } else { 1802 nmethod* result = method->code(); 1803 if (result == nullptr) { 1804 return false; 1805 } 1806 if (online_only && result->is_scc()) { 1807 return false; 1808 } 1809 bool same_level = (comp_level == result->comp_level()); 1810 if (result->has_clinit_barriers()) { 1811 return !same_level; // Allow replace preloaded code with new code of the same level 1812 } 1813 return same_level; 1814 } 1815 } 1816 } 1817 1818 1819 /** 1820 * See if this compilation is already requested. 1821 * 1822 * Implementation note: there is only a single "is in queue" bit 1823 * for each method. This means that the check below is overly 1824 * conservative in the sense that an osr compilation in the queue 1825 * will block a normal compilation from entering the queue (and vice 1826 * versa). This can be remedied by a full queue search to disambiguate 1827 * cases. If it is deemed profitable, this may be done. 1828 */ 1829 bool CompileBroker::compilation_is_in_queue(const methodHandle& method) { 1830 return method->queued_for_compilation(); 1831 } 1832 1833 // ------------------------------------------------------------------ 1834 // CompileBroker::compilation_is_prohibited 1835 // 1836 // See if this compilation is not allowed. 1837 bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded) { 1838 bool is_native = method->is_native(); 1839 // Some compilers may not support the compilation of natives. 1840 AbstractCompiler *comp = compiler(comp_level); 1841 if (is_native && (!CICompileNatives || comp == nullptr)) { 1842 method->set_not_compilable_quietly("native methods not supported", comp_level); 1843 return true; 1844 } 1845 1846 bool is_osr = (osr_bci != standard_entry_bci); 1847 // Some compilers may not support on stack replacement. 1848 if (is_osr && (!CICompileOSR || comp == nullptr)) { 1849 method->set_not_osr_compilable("OSR not supported", comp_level); 1850 return true; 1851 } 1852 1853 // The method may be explicitly excluded by the user. 1854 double scale; 1855 if (excluded || (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, scale) && scale == 0)) { 1856 bool quietly = CompilerOracle::be_quiet(); 1857 if (PrintCompilation && !quietly) { 1858 // This does not happen quietly... 1859 ResourceMark rm; 1860 tty->print("### Excluding %s:%s", 1861 method->is_native() ? "generation of native wrapper" : "compile", 1862 (method->is_static() ? " static" : "")); 1863 method->print_short_name(tty); 1864 tty->cr(); 1865 } 1866 method->set_not_compilable("excluded by CompileCommand", comp_level, !quietly); 1867 } 1868 1869 return false; 1870 } 1871 1872 /** 1873 * Generate serialized IDs for compilation requests. If certain debugging flags are used 1874 * and the ID is not within the specified range, the method is not compiled and 0 is returned. 1875 * The function also allows to generate separate compilation IDs for OSR compilations. 1876 */ 1877 int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) { 1878 #ifdef ASSERT 1879 bool is_osr = (osr_bci != standard_entry_bci); 1880 int id; 1881 if (method->is_native()) { 1882 assert(!is_osr, "can't be osr"); 1883 // Adapters, native wrappers and method handle intrinsics 1884 // should be generated always. 1885 return Atomic::add(CICountNative ? &_native_compilation_id : &_compilation_id, 1); 1886 } else if (CICountOSR && is_osr) { 1887 id = Atomic::add(&_osr_compilation_id, 1); 1888 if (CIStartOSR <= id && id < CIStopOSR) { 1889 return id; 1890 } 1891 } else { 1892 id = Atomic::add(&_compilation_id, 1); 1893 if (CIStart <= id && id < CIStop) { 1894 return id; 1895 } 1896 } 1897 1898 // Method was not in the appropriate compilation range. 1899 method->set_not_compilable_quietly("Not in requested compile id range"); 1900 return 0; 1901 #else 1902 // CICountOSR is a develop flag and set to 'false' by default. In a product built, 1903 // only _compilation_id is incremented. 1904 return Atomic::add(&_compilation_id, 1); 1905 #endif 1906 } 1907 1908 // ------------------------------------------------------------------ 1909 // CompileBroker::assign_compile_id_unlocked 1910 // 1911 // Public wrapper for assign_compile_id that acquires the needed locks 1912 int CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci) { 1913 return assign_compile_id(method, osr_bci); 1914 } 1915 1916 // ------------------------------------------------------------------ 1917 // CompileBroker::create_compile_task 1918 // 1919 // Create a CompileTask object representing the current request for 1920 // compilation. Add this task to the queue. 1921 CompileTask* CompileBroker::create_compile_task(CompileQueue* queue, 1922 int compile_id, 1923 const methodHandle& method, 1924 int osr_bci, 1925 int comp_level, 1926 const methodHandle& hot_method, 1927 int hot_count, 1928 SCCEntry* scc_entry, 1929 CompileTask::CompileReason compile_reason, 1930 bool requires_online_compilation, 1931 bool blocking) { 1932 CompileTask* new_task = CompileTask::allocate(); 1933 new_task->initialize(compile_id, method, osr_bci, comp_level, 1934 hot_method, hot_count, scc_entry, compile_reason, queue, 1935 requires_online_compilation, blocking); 1936 return new_task; 1937 } 1938 1939 #if INCLUDE_JVMCI 1940 // The number of milliseconds to wait before checking if 1941 // JVMCI compilation has made progress. 1942 static const long JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE = 1000; 1943 1944 // The number of JVMCI compilation progress checks that must fail 1945 // before unblocking a thread waiting for a blocking compilation. 1946 static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10; 1947 1948 /** 1949 * Waits for a JVMCI compiler to complete a given task. This thread 1950 * waits until either the task completes or it sees no JVMCI compilation 1951 * progress for N consecutive milliseconds where N is 1952 * JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE * 1953 * JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS. 1954 * 1955 * @return true if this thread needs to free/recycle the task 1956 */ 1957 bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) { 1958 assert(UseJVMCICompiler, "sanity"); 1959 MonitorLocker ml(thread, task->lock()); 1960 int progress_wait_attempts = 0; 1961 jint thread_jvmci_compilation_ticks = 0; 1962 jint global_jvmci_compilation_ticks = jvmci->global_compilation_ticks(); 1963 while (!task->is_complete() && !is_compilation_disabled_forever() && 1964 ml.wait(JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE)) { 1965 JVMCICompileState* jvmci_compile_state = task->blocking_jvmci_compile_state(); 1966 1967 bool progress; 1968 if (jvmci_compile_state != nullptr) { 1969 jint ticks = jvmci_compile_state->compilation_ticks(); 1970 progress = (ticks - thread_jvmci_compilation_ticks) != 0; 1971 JVMCI_event_1("waiting on compilation %d [ticks=%d]", task->compile_id(), ticks); 1972 thread_jvmci_compilation_ticks = ticks; 1973 } else { 1974 // Still waiting on JVMCI compiler queue. This thread may be holding a lock 1975 // that all JVMCI compiler threads are blocked on. We use the global JVMCI 1976 // compilation ticks to determine whether JVMCI compilation 1977 // is still making progress through the JVMCI compiler queue. 1978 jint ticks = jvmci->global_compilation_ticks(); 1979 progress = (ticks - global_jvmci_compilation_ticks) != 0; 1980 JVMCI_event_1("waiting on compilation %d to be queued [ticks=%d]", task->compile_id(), ticks); 1981 global_jvmci_compilation_ticks = ticks; 1982 } 1983 1984 if (!progress) { 1985 if (++progress_wait_attempts == JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS) { 1986 if (PrintCompilation) { 1987 task->print(tty, "wait for blocking compilation timed out"); 1988 } 1989 JVMCI_event_1("waiting on compilation %d timed out", task->compile_id()); 1990 break; 1991 } 1992 } else { 1993 progress_wait_attempts = 0; 1994 } 1995 } 1996 task->clear_waiter(); 1997 return task->is_complete(); 1998 } 1999 #endif 2000 2001 /** 2002 * Wait for the compilation task to complete. 2003 */ 2004 void CompileBroker::wait_for_completion(CompileTask* task) { 2005 if (CIPrintCompileQueue) { 2006 ttyLocker ttyl; 2007 tty->print_cr("BLOCKING FOR COMPILE"); 2008 } 2009 2010 assert(task->is_blocking(), "can only wait on blocking task"); 2011 2012 JavaThread* thread = JavaThread::current(); 2013 2014 methodHandle method(thread, task->method()); 2015 bool free_task; 2016 #if INCLUDE_JVMCI 2017 AbstractCompiler* comp = compiler(task->comp_level()); 2018 if (!UseJVMCINativeLibrary && comp->is_jvmci() && !task->should_wait_for_compilation()) { 2019 // It may return before compilation is completed. 2020 // Note that libjvmci should not pre-emptively unblock 2021 // a thread waiting for a compilation as it does not call 2022 // Java code and so is not deadlock prone like jarjvmci. 2023 free_task = wait_for_jvmci_completion((JVMCICompiler*) comp, task, thread); 2024 } else 2025 #endif 2026 { 2027 MonitorLocker ml(thread, task->lock()); 2028 free_task = true; 2029 task->inc_waiting_for_completion(); 2030 while (!task->is_complete() && !is_compilation_disabled_forever()) { 2031 ml.wait(); 2032 } 2033 task->dec_waiting_for_completion(); 2034 } 2035 2036 if (free_task) { 2037 if (is_compilation_disabled_forever()) { 2038 CompileTask::free(task); 2039 return; 2040 } 2041 2042 // It is harmless to check this status without the lock, because 2043 // completion is a stable property (until the task object is recycled). 2044 assert(task->is_complete(), "Compilation should have completed"); 2045 2046 // By convention, the waiter is responsible for recycling a 2047 // blocking CompileTask. Since there is only one waiter ever 2048 // waiting on a CompileTask, we know that no one else will 2049 // be using this CompileTask; we can free it. 2050 CompileTask::free(task); 2051 } 2052 } 2053 2054 /** 2055 * Initialize compiler thread(s) + compiler object(s). The postcondition 2056 * of this function is that the compiler runtimes are initialized and that 2057 * compiler threads can start compiling. 2058 */ 2059 bool CompileBroker::init_compiler_runtime() { 2060 CompilerThread* thread = CompilerThread::current(); 2061 AbstractCompiler* comp = thread->compiler(); 2062 // Final sanity check - the compiler object must exist 2063 guarantee(comp != nullptr, "Compiler object must exist"); 2064 2065 { 2066 // Must switch to native to allocate ci_env 2067 ThreadToNativeFromVM ttn(thread); 2068 ciEnv ci_env((CompileTask*)nullptr); 2069 // Cache Jvmti state 2070 ci_env.cache_jvmti_state(); 2071 // Cache DTrace flags 2072 ci_env.cache_dtrace_flags(); 2073 2074 // Switch back to VM state to do compiler initialization 2075 ThreadInVMfromNative tv(thread); 2076 2077 comp->initialize(); 2078 } 2079 2080 if (comp->is_failed()) { 2081 disable_compilation_forever(); 2082 // If compiler initialization failed, no compiler thread that is specific to a 2083 // particular compiler runtime will ever start to compile methods. 2084 shutdown_compiler_runtime(comp, thread); 2085 return false; 2086 } 2087 2088 // C1 specific check 2089 if (comp->is_c1() && (thread->get_buffer_blob() == nullptr)) { 2090 warning("Initialization of %s thread failed (no space to run compilers)", thread->name()); 2091 return false; 2092 } 2093 2094 return true; 2095 } 2096 2097 void CompileBroker::free_buffer_blob_if_allocated(CompilerThread* thread) { 2098 BufferBlob* blob = thread->get_buffer_blob(); 2099 if (blob != nullptr) { 2100 blob->purge(); 2101 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2102 CodeCache::free(blob); 2103 } 2104 } 2105 2106 /** 2107 * If C1 and/or C2 initialization failed, we shut down all compilation. 2108 * We do this to keep things simple. This can be changed if it ever turns 2109 * out to be a problem. 2110 */ 2111 void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) { 2112 free_buffer_blob_if_allocated(thread); 2113 2114 log_info(compilation)("shutdown_compiler_runtime: " INTPTR_FORMAT, p2i(thread)); 2115 2116 if (comp->should_perform_shutdown()) { 2117 // There are two reasons for shutting down the compiler 2118 // 1) compiler runtime initialization failed 2119 // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing 2120 warning("%s initialization failed. Shutting down all compilers", comp->name()); 2121 2122 // Only one thread per compiler runtime object enters here 2123 // Set state to shut down 2124 comp->set_shut_down(); 2125 2126 // Delete all queued compilation tasks to make compiler threads exit faster. 2127 if (_c1_compile_queue != nullptr) { 2128 _c1_compile_queue->free_all(); 2129 } 2130 2131 if (_c2_compile_queue != nullptr) { 2132 _c2_compile_queue->free_all(); 2133 } 2134 2135 if (_c3_compile_queue != nullptr) { 2136 _c3_compile_queue->free_all(); 2137 } 2138 2139 // Set flags so that we continue execution with using interpreter only. 2140 UseCompiler = false; 2141 UseInterpreter = true; 2142 2143 // We could delete compiler runtimes also. However, there are references to 2144 // the compiler runtime(s) (e.g., nmethod::is_compiled_by_c1()) which then 2145 // fail. This can be done later if necessary. 2146 } 2147 } 2148 2149 /** 2150 * Helper function to create new or reuse old CompileLog. 2151 */ 2152 CompileLog* CompileBroker::get_log(CompilerThread* ct) { 2153 if (!LogCompilation) return nullptr; 2154 2155 AbstractCompiler *compiler = ct->compiler(); 2156 bool jvmci = JVMCI_ONLY( compiler->is_jvmci() ||) false; 2157 bool c1 = compiler->is_c1(); 2158 jobject* compiler_objects = c1 ? _compiler1_objects : (_c3_count == 0 ? _compiler2_objects : (jvmci ? _compiler2_objects : _compiler3_objects)); 2159 assert(compiler_objects != nullptr, "must be initialized at this point"); 2160 CompileLog** logs = c1 ? _compiler1_logs : (_c3_count == 0 ? _compiler2_logs : (jvmci ? _compiler2_logs : _compiler3_logs)); 2161 assert(logs != nullptr, "must be initialized at this point"); 2162 int count = c1 ? _c1_count : (_c3_count == 0 ? _c2_count : (jvmci ? _c2_count : _c3_count)); 2163 2164 if (ct->queue() == _sc1_compile_queue || ct->queue() == _sc2_compile_queue) { 2165 compiler_objects = _sc_objects; 2166 logs = _sc_logs; 2167 count = _sc_count; 2168 } 2169 // Find Compiler number by its threadObj. 2170 oop compiler_obj = ct->threadObj(); 2171 int compiler_number = 0; 2172 bool found = false; 2173 for (; compiler_number < count; compiler_number++) { 2174 if (JNIHandles::resolve_non_null(compiler_objects[compiler_number]) == compiler_obj) { 2175 found = true; 2176 break; 2177 } 2178 } 2179 assert(found, "Compiler must exist at this point"); 2180 2181 // Determine pointer for this thread's log. 2182 CompileLog** log_ptr = &logs[compiler_number]; 2183 2184 // Return old one if it exists. 2185 CompileLog* log = *log_ptr; 2186 if (log != nullptr) { 2187 ct->init_log(log); 2188 return log; 2189 } 2190 2191 // Create a new one and remember it. 2192 init_compiler_thread_log(); 2193 log = ct->log(); 2194 *log_ptr = log; 2195 return log; 2196 } 2197 2198 // ------------------------------------------------------------------ 2199 // CompileBroker::compiler_thread_loop 2200 // 2201 // The main loop run by a CompilerThread. 2202 void CompileBroker::compiler_thread_loop() { 2203 CompilerThread* thread = CompilerThread::current(); 2204 CompileQueue* queue = thread->queue(); 2205 // For the thread that initializes the ciObjectFactory 2206 // this resource mark holds all the shared objects 2207 ResourceMark rm; 2208 2209 // First thread to get here will initialize the compiler interface 2210 2211 { 2212 ASSERT_IN_VM; 2213 MutexLocker only_one (thread, CompileThread_lock); 2214 if (!ciObjectFactory::is_initialized()) { 2215 ciObjectFactory::initialize(); 2216 } 2217 } 2218 2219 // Open a log. 2220 CompileLog* log = get_log(thread); 2221 if (log != nullptr) { 2222 log->begin_elem("start_compile_thread name='%s' thread='%zu' process='%d'", 2223 thread->name(), 2224 os::current_thread_id(), 2225 os::current_process_id()); 2226 log->stamp(); 2227 log->end_elem(); 2228 } 2229 2230 // If compiler thread/runtime initialization fails, exit the compiler thread 2231 if (!init_compiler_runtime()) { 2232 return; 2233 } 2234 2235 thread->start_idle_timer(); 2236 2237 // Poll for new compilation tasks as long as the JVM runs. Compilation 2238 // should only be disabled if something went wrong while initializing the 2239 // compiler runtimes. This, in turn, should not happen. The only known case 2240 // when compiler runtime initialization fails is if there is not enough free 2241 // space in the code cache to generate the necessary stubs, etc. 2242 while (!is_compilation_disabled_forever()) { 2243 // We need this HandleMark to avoid leaking VM handles. 2244 HandleMark hm(thread); 2245 2246 RecompilationPolicy::recompilation_step(RecompilationWorkUnitSize, thread); 2247 2248 CompileTask* task = queue->get(thread); 2249 2250 if (task == nullptr) { 2251 if (UseDynamicNumberOfCompilerThreads) { 2252 // Access compiler_count under lock to enforce consistency. 2253 MutexLocker only_one(CompileThread_lock); 2254 if (can_remove(thread, true)) { 2255 if (trace_compiler_threads()) { 2256 ResourceMark rm; 2257 stringStream msg; 2258 msg.print("Removing compiler thread %s after " JLONG_FORMAT " ms idle time", 2259 thread->name(), thread->idle_time_millis()); 2260 print_compiler_threads(msg); 2261 } 2262 2263 // Notify compiler that the compiler thread is about to stop 2264 thread->compiler()->stopping_compiler_thread(thread); 2265 2266 free_buffer_blob_if_allocated(thread); 2267 return; // Stop this thread. 2268 } 2269 } 2270 } else { 2271 // Assign the task to the current thread. Mark this compilation 2272 // thread as active for the profiler. 2273 // CompileTaskWrapper also keeps the Method* from being deallocated if redefinition 2274 // occurs after fetching the compile task off the queue. 2275 CompileTaskWrapper ctw(task); 2276 methodHandle method(thread, task->method()); 2277 2278 // Never compile a method if breakpoints are present in it 2279 if (method()->number_of_breakpoints() == 0) { 2280 // Compile the method. 2281 if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { 2282 invoke_compiler_on_method(task); 2283 thread->start_idle_timer(); 2284 } else { 2285 // After compilation is disabled, remove remaining methods from queue 2286 method->clear_queued_for_compilation(); 2287 method->set_pending_queue_processed(false); 2288 task->set_failure_reason("compilation is disabled"); 2289 } 2290 } else { 2291 task->set_failure_reason("breakpoints are present"); 2292 } 2293 2294 if (UseDynamicNumberOfCompilerThreads) { 2295 possibly_add_compiler_threads(thread); 2296 assert(!thread->has_pending_exception(), "should have been handled"); 2297 } 2298 } 2299 } 2300 2301 // Shut down compiler runtime 2302 shutdown_compiler_runtime(thread->compiler(), thread); 2303 } 2304 2305 // ------------------------------------------------------------------ 2306 // CompileBroker::init_compiler_thread_log 2307 // 2308 // Set up state required by +LogCompilation. 2309 void CompileBroker::init_compiler_thread_log() { 2310 CompilerThread* thread = CompilerThread::current(); 2311 char file_name[4*K]; 2312 FILE* fp = nullptr; 2313 intx thread_id = os::current_thread_id(); 2314 for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) { 2315 const char* dir = (try_temp_dir ? os::get_temp_directory() : nullptr); 2316 if (dir == nullptr) { 2317 jio_snprintf(file_name, sizeof(file_name), "hs_c%zu_pid%u.log", 2318 thread_id, os::current_process_id()); 2319 } else { 2320 jio_snprintf(file_name, sizeof(file_name), 2321 "%s%shs_c%zu_pid%u.log", dir, 2322 os::file_separator(), thread_id, os::current_process_id()); 2323 } 2324 2325 fp = os::fopen(file_name, "wt"); 2326 if (fp != nullptr) { 2327 if (LogCompilation && Verbose) { 2328 tty->print_cr("Opening compilation log %s", file_name); 2329 } 2330 CompileLog* log = new(mtCompiler) CompileLog(file_name, fp, thread_id); 2331 if (log == nullptr) { 2332 fclose(fp); 2333 return; 2334 } 2335 thread->init_log(log); 2336 2337 if (xtty != nullptr) { 2338 ttyLocker ttyl; 2339 // Record any per thread log files 2340 xtty->elem("thread_logfile thread='%zd' filename='%s'", thread_id, file_name); 2341 } 2342 return; 2343 } 2344 } 2345 warning("Cannot open log file: %s", file_name); 2346 } 2347 2348 void CompileBroker::log_metaspace_failure() { 2349 const char* message = "some methods may not be compiled because metaspace " 2350 "is out of memory"; 2351 if (CompilationLog::log() != nullptr) { 2352 CompilationLog::log()->log_metaspace_failure(message); 2353 } 2354 if (PrintCompilation) { 2355 tty->print_cr("COMPILE PROFILING SKIPPED: %s", message); 2356 } 2357 } 2358 2359 2360 // ------------------------------------------------------------------ 2361 // CompileBroker::set_should_block 2362 // 2363 // Set _should_block. 2364 // Call this from the VM, with Threads_lock held and a safepoint requested. 2365 void CompileBroker::set_should_block() { 2366 assert(Threads_lock->owner() == Thread::current(), "must have threads lock"); 2367 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint already"); 2368 #ifndef PRODUCT 2369 if (PrintCompilation && (Verbose || WizardMode)) 2370 tty->print_cr("notifying compiler thread pool to block"); 2371 #endif 2372 _should_block = true; 2373 } 2374 2375 // ------------------------------------------------------------------ 2376 // CompileBroker::maybe_block 2377 // 2378 // Call this from the compiler at convenient points, to poll for _should_block. 2379 void CompileBroker::maybe_block() { 2380 if (_should_block) { 2381 #ifndef PRODUCT 2382 if (PrintCompilation && (Verbose || WizardMode)) 2383 tty->print_cr("compiler thread " INTPTR_FORMAT " poll detects block request", p2i(Thread::current())); 2384 #endif 2385 ThreadInVMfromNative tivfn(JavaThread::current()); 2386 } 2387 } 2388 2389 // wrapper for CodeCache::print_summary() 2390 static void codecache_print(bool detailed) 2391 { 2392 stringStream s; 2393 // Dump code cache into a buffer before locking the tty, 2394 { 2395 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2396 CodeCache::print_summary(&s, detailed); 2397 } 2398 ttyLocker ttyl; 2399 tty->print("%s", s.freeze()); 2400 } 2401 2402 // wrapper for CodeCache::print_summary() using outputStream 2403 static void codecache_print(outputStream* out, bool detailed) { 2404 stringStream s; 2405 2406 // Dump code cache into a buffer 2407 { 2408 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2409 CodeCache::print_summary(&s, detailed); 2410 } 2411 2412 char* remaining_log = s.as_string(); 2413 while (*remaining_log != '\0') { 2414 char* eol = strchr(remaining_log, '\n'); 2415 if (eol == nullptr) { 2416 out->print_cr("%s", remaining_log); 2417 remaining_log = remaining_log + strlen(remaining_log); 2418 } else { 2419 *eol = '\0'; 2420 out->print_cr("%s", remaining_log); 2421 remaining_log = eol + 1; 2422 } 2423 } 2424 } 2425 2426 void CompileBroker::handle_compile_error(CompilerThread* thread, CompileTask* task, ciEnv* ci_env, 2427 int compilable, const char* failure_reason) { 2428 if (!AbortVMOnCompilationFailure) { 2429 return; 2430 } 2431 if (compilable == ciEnv::MethodCompilable_not_at_tier) { 2432 fatal("Not compilable at tier %d: %s", task->comp_level(), failure_reason); 2433 } 2434 if (compilable == ciEnv::MethodCompilable_never) { 2435 fatal("Never compilable: %s", failure_reason); 2436 } 2437 } 2438 2439 static void post_compilation_event(EventCompilation& event, CompileTask* task) { 2440 assert(task != nullptr, "invariant"); 2441 CompilerEvent::CompilationEvent::post(event, 2442 task->compile_id(), 2443 task->compiler()->type(), 2444 task->method(), 2445 task->comp_level(), 2446 task->is_success(), 2447 task->osr_bci() != CompileBroker::standard_entry_bci, 2448 task->nm_total_size(), 2449 task->num_inlined_bytecodes(), 2450 task->arena_bytes()); 2451 } 2452 2453 int DirectivesStack::_depth = 0; 2454 CompilerDirectives* DirectivesStack::_top = nullptr; 2455 CompilerDirectives* DirectivesStack::_bottom = nullptr; 2456 2457 // Acquires Compilation_lock and waits for it to be notified 2458 // as long as WhiteBox::compilation_locked is true. 2459 static void whitebox_lock_compilation() { 2460 MonitorLocker locker(Compilation_lock, Mutex::_no_safepoint_check_flag); 2461 while (WhiteBox::compilation_locked) { 2462 locker.wait(); 2463 } 2464 } 2465 2466 // ------------------------------------------------------------------ 2467 // CompileBroker::invoke_compiler_on_method 2468 // 2469 // Compile a method. 2470 // 2471 void CompileBroker::invoke_compiler_on_method(CompileTask* task) { 2472 task->print_ul(); 2473 elapsedTimer time; 2474 2475 DirectiveSet* directive = task->directive(); 2476 2477 CompilerThread* thread = CompilerThread::current(); 2478 ResourceMark rm(thread); 2479 2480 if (CompilationLog::log() != nullptr) { 2481 CompilationLog::log()->log_compile(thread, task); 2482 } 2483 2484 // Common flags. 2485 int compile_id = task->compile_id(); 2486 int osr_bci = task->osr_bci(); 2487 bool is_osr = (osr_bci != standard_entry_bci); 2488 bool should_log = (thread->log() != nullptr); 2489 bool should_break = false; 2490 bool should_print_compilation = PrintCompilation || directive->PrintCompilationOption; 2491 const int task_level = task->comp_level(); 2492 AbstractCompiler* comp = task->compiler(); 2493 { 2494 // create the handle inside it's own block so it can't 2495 // accidentally be referenced once the thread transitions to 2496 // native. The NoHandleMark before the transition should catch 2497 // any cases where this occurs in the future. 2498 methodHandle method(thread, task->method()); 2499 2500 assert(!method->is_native(), "no longer compile natives"); 2501 2502 // Update compile information when using perfdata. 2503 if (UsePerfData) { 2504 update_compile_perf_data(thread, method, is_osr); 2505 } 2506 2507 DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level)); 2508 } 2509 2510 should_break = directive->BreakAtCompileOption || task->check_break_at_flags(); 2511 if (should_log && !directive->LogOption) { 2512 should_log = false; 2513 } 2514 2515 // Allocate a new set of JNI handles. 2516 JNIHandleMark jhm(thread); 2517 Method* target_handle = task->method(); 2518 int compilable = ciEnv::MethodCompilable; 2519 const char* failure_reason = nullptr; 2520 bool failure_reason_on_C_heap = false; 2521 const char* retry_message = nullptr; 2522 2523 #if INCLUDE_JVMCI 2524 if (UseJVMCICompiler && comp != nullptr && comp->is_jvmci()) { 2525 JVMCICompiler* jvmci = (JVMCICompiler*) comp; 2526 2527 TraceTime t1("compilation", &time); 2528 EventCompilation event; 2529 JVMCICompileState compile_state(task, jvmci); 2530 JVMCIRuntime *runtime = nullptr; 2531 2532 if (JVMCI::in_shutdown()) { 2533 failure_reason = "in JVMCI shutdown"; 2534 retry_message = "not retryable"; 2535 compilable = ciEnv::MethodCompilable_never; 2536 } else if (compile_state.target_method_is_old()) { 2537 // Skip redefined methods 2538 failure_reason = "redefined method"; 2539 retry_message = "not retryable"; 2540 compilable = ciEnv::MethodCompilable_never; 2541 } else { 2542 JVMCIEnv env(thread, &compile_state, __FILE__, __LINE__); 2543 if (env.init_error() != JNI_OK) { 2544 const char* msg = env.init_error_msg(); 2545 failure_reason = os::strdup(err_msg("Error attaching to libjvmci (err: %d, %s)", 2546 env.init_error(), msg == nullptr ? "unknown" : msg), mtJVMCI); 2547 bool reason_on_C_heap = true; 2548 // In case of JNI_ENOMEM, there's a good chance a subsequent attempt to create libjvmci or attach to it 2549 // might succeed. Other errors most likely indicate a non-recoverable error in the JVMCI runtime. 2550 bool retryable = env.init_error() == JNI_ENOMEM; 2551 compile_state.set_failure(retryable, failure_reason, reason_on_C_heap); 2552 } 2553 if (failure_reason == nullptr) { 2554 if (WhiteBoxAPI && WhiteBox::compilation_locked) { 2555 // Must switch to native to block 2556 ThreadToNativeFromVM ttn(thread); 2557 whitebox_lock_compilation(); 2558 } 2559 methodHandle method(thread, target_handle); 2560 runtime = env.runtime(); 2561 runtime->compile_method(&env, jvmci, method, osr_bci); 2562 2563 failure_reason = compile_state.failure_reason(); 2564 failure_reason_on_C_heap = compile_state.failure_reason_on_C_heap(); 2565 if (!compile_state.retryable()) { 2566 retry_message = "not retryable"; 2567 compilable = ciEnv::MethodCompilable_not_at_tier; 2568 } 2569 if (!task->is_success()) { 2570 assert(failure_reason != nullptr, "must specify failure_reason"); 2571 } 2572 } 2573 } 2574 if (!task->is_success() && !JVMCI::in_shutdown()) { 2575 handle_compile_error(thread, task, nullptr, compilable, failure_reason); 2576 } 2577 if (event.should_commit()) { 2578 post_compilation_event(event, task); 2579 } 2580 2581 if (runtime != nullptr) { 2582 runtime->post_compile(thread); 2583 } 2584 } else 2585 #endif // INCLUDE_JVMCI 2586 { 2587 NoHandleMark nhm; 2588 ThreadToNativeFromVM ttn(thread); 2589 2590 ciEnv ci_env(task); 2591 if (should_break) { 2592 ci_env.set_break_at_compile(true); 2593 } 2594 if (should_log) { 2595 ci_env.set_log(thread->log()); 2596 } 2597 assert(thread->env() == &ci_env, "set by ci_env"); 2598 // The thread-env() field is cleared in ~CompileTaskWrapper. 2599 2600 // Cache Jvmti state 2601 bool method_is_old = ci_env.cache_jvmti_state(); 2602 2603 // Skip redefined methods 2604 if (method_is_old) { 2605 ci_env.record_method_not_compilable("redefined method", true); 2606 } 2607 2608 // Cache DTrace flags 2609 ci_env.cache_dtrace_flags(); 2610 2611 ciMethod* target = ci_env.get_method_from_handle(target_handle); 2612 2613 TraceTime t1("compilation", &time); 2614 EventCompilation event; 2615 2616 bool install_code = true; 2617 if (comp == nullptr) { 2618 ci_env.record_method_not_compilable("no compiler"); 2619 } else if (!ci_env.failing()) { 2620 if (WhiteBoxAPI && WhiteBox::compilation_locked) { 2621 whitebox_lock_compilation(); 2622 } 2623 if (StoreCachedCode && task->is_precompiled()) { 2624 install_code = false; // not suitable in the current context 2625 } 2626 comp->compile_method(&ci_env, target, osr_bci, install_code, directive); 2627 2628 /* Repeat compilation without installing code for profiling purposes */ 2629 int repeat_compilation_count = directive->RepeatCompilationOption; 2630 while (repeat_compilation_count > 0) { 2631 ResourceMark rm(thread); 2632 task->print_ul("NO CODE INSTALLED"); 2633 comp->compile_method(&ci_env, target, osr_bci, false, directive); 2634 repeat_compilation_count--; 2635 } 2636 } 2637 2638 DirectivesStack::release(directive); 2639 2640 if (!ci_env.failing() && !task->is_success() && install_code) { 2641 assert(ci_env.failure_reason() != nullptr, "expect failure reason"); 2642 assert(false, "compiler should always document failure: %s", ci_env.failure_reason()); 2643 // The compiler elected, without comment, not to register a result. 2644 // Do not attempt further compilations of this method. 2645 ci_env.record_method_not_compilable("compile failed"); 2646 } 2647 2648 // Copy this bit to the enclosing block: 2649 compilable = ci_env.compilable(); 2650 2651 if (ci_env.failing()) { 2652 // Duplicate the failure reason string, so that it outlives ciEnv 2653 failure_reason = os::strdup(ci_env.failure_reason(), mtCompiler); 2654 failure_reason_on_C_heap = true; 2655 retry_message = ci_env.retry_message(); 2656 ci_env.report_failure(failure_reason); 2657 } 2658 2659 if (ci_env.failing()) { 2660 handle_compile_error(thread, task, &ci_env, compilable, failure_reason); 2661 } 2662 if (event.should_commit()) { 2663 post_compilation_event(event, task); 2664 } 2665 } 2666 2667 if (failure_reason != nullptr) { 2668 task->set_failure_reason(failure_reason, failure_reason_on_C_heap); 2669 if (CompilationLog::log() != nullptr) { 2670 CompilationLog::log()->log_failure(thread, task, failure_reason, retry_message); 2671 } 2672 if (PrintCompilation) { 2673 FormatBufferResource msg = retry_message != nullptr ? 2674 FormatBufferResource("COMPILE SKIPPED: %s (%s)", failure_reason, retry_message) : 2675 FormatBufferResource("COMPILE SKIPPED: %s", failure_reason); 2676 task->print(tty, msg); 2677 } 2678 } 2679 2680 task->mark_finished(os::elapsed_counter()); 2681 2682 methodHandle method(thread, task->method()); 2683 2684 DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success()); 2685 2686 collect_statistics(thread, time, task); 2687 2688 if (PrintCompilation && PrintCompilation2) { 2689 tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp 2690 tty->print("%4d ", compile_id); // print compilation number 2691 tty->print("%s ", (is_osr ? "%" : (task->is_scc() ? "A" : " "))); 2692 if (task->is_success()) { 2693 tty->print("size: %d(%d) ", task->nm_total_size(), task->nm_insts_size()); 2694 } 2695 tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes()); 2696 } 2697 2698 Log(compilation, codecache) log; 2699 if (log.is_debug()) { 2700 LogStream ls(log.debug()); 2701 codecache_print(&ls, /* detailed= */ false); 2702 } 2703 if (PrintCodeCacheOnCompilation) { 2704 codecache_print(/* detailed= */ false); 2705 } 2706 // Disable compilation, if required. 2707 switch (compilable) { 2708 case ciEnv::MethodCompilable_never: 2709 if (is_osr) 2710 method->set_not_osr_compilable_quietly("MethodCompilable_never"); 2711 else 2712 method->set_not_compilable_quietly("MethodCompilable_never"); 2713 break; 2714 case ciEnv::MethodCompilable_not_at_tier: 2715 if (is_osr) 2716 method->set_not_osr_compilable_quietly("MethodCompilable_not_at_tier", task_level); 2717 else 2718 method->set_not_compilable_quietly("MethodCompilable_not_at_tier", task_level); 2719 break; 2720 } 2721 2722 // Note that the queued_for_compilation bits are cleared without 2723 // protection of a mutex. [They were set by the requester thread, 2724 // when adding the task to the compile queue -- at which time the 2725 // compile queue lock was held. Subsequently, we acquired the compile 2726 // queue lock to get this task off the compile queue; thus (to belabour 2727 // the point somewhat) our clearing of the bits must be occurring 2728 // only after the setting of the bits. See also 14012000 above. 2729 method->clear_queued_for_compilation(); 2730 method->set_pending_queue_processed(false); 2731 2732 if (should_print_compilation) { 2733 ResourceMark rm; 2734 task->print_tty(); 2735 } 2736 } 2737 2738 /** 2739 * The CodeCache is full. Print warning and disable compilation. 2740 * Schedule code cache cleaning so compilation can continue later. 2741 * This function needs to be called only from CodeCache::allocate(), 2742 * since we currently handle a full code cache uniformly. 2743 */ 2744 void CompileBroker::handle_full_code_cache(CodeBlobType code_blob_type) { 2745 UseInterpreter = true; 2746 if (UseCompiler || AlwaysCompileLoopMethods ) { 2747 if (xtty != nullptr) { 2748 stringStream s; 2749 // Dump code cache state into a buffer before locking the tty, 2750 // because log_state() will use locks causing lock conflicts. 2751 CodeCache::log_state(&s); 2752 // Lock to prevent tearing 2753 ttyLocker ttyl; 2754 xtty->begin_elem("code_cache_full"); 2755 xtty->print("%s", s.freeze()); 2756 xtty->stamp(); 2757 xtty->end_elem(); 2758 } 2759 2760 #ifndef PRODUCT 2761 if (ExitOnFullCodeCache) { 2762 codecache_print(/* detailed= */ true); 2763 before_exit(JavaThread::current()); 2764 exit_globals(); // will delete tty 2765 vm_direct_exit(1); 2766 } 2767 #endif 2768 if (UseCodeCacheFlushing) { 2769 // Since code cache is full, immediately stop new compiles 2770 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { 2771 log_info(codecache)("Code cache is full - disabling compilation"); 2772 } 2773 } else { 2774 disable_compilation_forever(); 2775 } 2776 2777 CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning()); 2778 } 2779 } 2780 2781 // ------------------------------------------------------------------ 2782 // CompileBroker::update_compile_perf_data 2783 // 2784 // Record this compilation for debugging purposes. 2785 void CompileBroker::update_compile_perf_data(CompilerThread* thread, const methodHandle& method, bool is_osr) { 2786 ResourceMark rm; 2787 char* method_name = method->name()->as_C_string(); 2788 char current_method[CompilerCounters::cmname_buffer_length]; 2789 size_t maxLen = CompilerCounters::cmname_buffer_length; 2790 2791 const char* class_name = method->method_holder()->name()->as_C_string(); 2792 2793 size_t s1len = strlen(class_name); 2794 size_t s2len = strlen(method_name); 2795 2796 // check if we need to truncate the string 2797 if (s1len + s2len + 2 > maxLen) { 2798 2799 // the strategy is to lop off the leading characters of the 2800 // class name and the trailing characters of the method name. 2801 2802 if (s2len + 2 > maxLen) { 2803 // lop of the entire class name string, let snprintf handle 2804 // truncation of the method name. 2805 class_name += s1len; // null string 2806 } 2807 else { 2808 // lop off the extra characters from the front of the class name 2809 class_name += ((s1len + s2len + 2) - maxLen); 2810 } 2811 } 2812 2813 jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name); 2814 2815 int last_compile_type = normal_compile; 2816 if (CICountOSR && is_osr) { 2817 last_compile_type = osr_compile; 2818 } else if (CICountNative && method->is_native()) { 2819 last_compile_type = native_compile; 2820 } 2821 2822 CompilerCounters* counters = thread->counters(); 2823 counters->set_current_method(current_method); 2824 counters->set_compile_type((jlong) last_compile_type); 2825 } 2826 2827 // ------------------------------------------------------------------ 2828 // CompileBroker::collect_statistics 2829 // 2830 // Collect statistics about the compilation. 2831 2832 void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task) { 2833 bool success = task->is_success(); 2834 methodHandle method (thread, task->method()); 2835 int compile_id = task->compile_id(); 2836 bool is_osr = (task->osr_bci() != standard_entry_bci); 2837 const int comp_level = task->comp_level(); 2838 CompilerCounters* counters = thread->counters(); 2839 2840 MutexLocker locker(CompileStatistics_lock); 2841 2842 // _perf variables are production performance counters which are 2843 // updated regardless of the setting of the CITime and CITimeEach flags 2844 // 2845 2846 // account all time, including bailouts and failures in this counter; 2847 // C1 and C2 counters are counting both successful and unsuccessful compiles 2848 _t_total_compilation.add(&time); 2849 2850 // Update compilation times. Used by the implementation of JFR CompilerStatistics 2851 // and java.lang.management.CompilationMXBean. 2852 _perf_total_compilation->inc(time.ticks()); 2853 _peak_compilation_time = MAX2(time.milliseconds(), _peak_compilation_time); 2854 2855 if (!success) { 2856 _total_bailout_count++; 2857 if (UsePerfData) { 2858 _perf_last_failed_method->set_value(counters->current_method()); 2859 _perf_last_failed_type->set_value(counters->compile_type()); 2860 _perf_total_bailout_count->inc(); 2861 } 2862 _t_bailedout_compilation.add(&time); 2863 2864 if (CITime || log_is_enabled(Info, init)) { 2865 CompilerStatistics* stats = nullptr; 2866 if (task->is_scc()) { 2867 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2868 stats = &_scc_stats_per_level[level]; 2869 } else { 2870 stats = &_stats_per_level[comp_level-1]; 2871 } 2872 stats->_bailout.update(time, 0); 2873 } 2874 } else if (!task->is_success()) { 2875 if (UsePerfData) { 2876 _perf_last_invalidated_method->set_value(counters->current_method()); 2877 _perf_last_invalidated_type->set_value(counters->compile_type()); 2878 _perf_total_invalidated_count->inc(); 2879 } 2880 _total_invalidated_count++; 2881 _t_invalidated_compilation.add(&time); 2882 2883 if (CITime || log_is_enabled(Info, init)) { 2884 CompilerStatistics* stats = nullptr; 2885 if (task->is_scc()) { 2886 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2887 stats = &_scc_stats_per_level[level]; 2888 } else { 2889 stats = &_stats_per_level[comp_level-1]; 2890 } 2891 stats->_invalidated.update(time, 0); 2892 } 2893 } else { 2894 // Compilation succeeded 2895 if (CITime || log_is_enabled(Info, init)) { 2896 int bytes_compiled = method->code_size() + task->num_inlined_bytecodes(); 2897 if (is_osr) { 2898 _t_osr_compilation.add(&time); 2899 _sum_osr_bytes_compiled += bytes_compiled; 2900 } else { 2901 _t_standard_compilation.add(&time); 2902 _sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); 2903 } 2904 2905 // Collect statistic per compilation level 2906 if (task->is_scc()) { 2907 _scc_stats._standard.update(time, bytes_compiled); 2908 _scc_stats._nmethods_size += task->nm_total_size(); 2909 _scc_stats._nmethods_code_size += task->nm_insts_size(); 2910 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2911 CompilerStatistics* stats = &_scc_stats_per_level[level]; 2912 stats->_standard.update(time, bytes_compiled); 2913 stats->_nmethods_size += task->nm_total_size(); 2914 stats->_nmethods_code_size += task->nm_insts_size(); 2915 } else if (comp_level > CompLevel_none && comp_level <= CompLevel_full_optimization) { 2916 CompilerStatistics* stats = &_stats_per_level[comp_level-1]; 2917 if (is_osr) { 2918 stats->_osr.update(time, bytes_compiled); 2919 } else { 2920 stats->_standard.update(time, bytes_compiled); 2921 } 2922 stats->_nmethods_size += task->nm_total_size(); 2923 stats->_nmethods_code_size += task->nm_insts_size(); 2924 } else { 2925 assert(false, "CompilerStatistics object does not exist for compilation level %d", comp_level); 2926 } 2927 2928 // Collect statistic per compiler 2929 AbstractCompiler* comp = task->compiler(); 2930 if (comp && !task->is_scc()) { 2931 CompilerStatistics* stats = comp->stats(); 2932 if (is_osr) { 2933 stats->_osr.update(time, bytes_compiled); 2934 } else { 2935 stats->_standard.update(time, bytes_compiled); 2936 } 2937 stats->_nmethods_size += task->nm_total_size(); 2938 stats->_nmethods_code_size += task->nm_insts_size(); 2939 } else if (!task->is_scc()) { // if (!comp) 2940 assert(false, "Compiler object must exist"); 2941 } 2942 } 2943 2944 if (UsePerfData) { 2945 // save the name of the last method compiled 2946 _perf_last_method->set_value(counters->current_method()); 2947 _perf_last_compile_type->set_value(counters->compile_type()); 2948 _perf_last_compile_size->set_value(method->code_size() + 2949 task->num_inlined_bytecodes()); 2950 if (is_osr) { 2951 _perf_osr_compilation->inc(time.ticks()); 2952 _perf_sum_osr_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes()); 2953 } else { 2954 _perf_standard_compilation->inc(time.ticks()); 2955 _perf_sum_standard_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes()); 2956 } 2957 } 2958 2959 if (CITimeEach) { 2960 double compile_time = time.seconds(); 2961 double bytes_per_sec = compile_time == 0.0 ? 0.0 : (double)(method->code_size() + task->num_inlined_bytecodes()) / compile_time; 2962 tty->print_cr("%3d seconds: %6.3f bytes/sec : %f (bytes %d + %d inlined)", 2963 compile_id, compile_time, bytes_per_sec, method->code_size(), task->num_inlined_bytecodes()); 2964 } 2965 2966 // Collect counts of successful compilations 2967 _sum_nmethod_size += task->nm_total_size(); 2968 _sum_nmethod_code_size += task->nm_insts_size(); 2969 _total_compile_count++; 2970 2971 if (UsePerfData) { 2972 _perf_sum_nmethod_size->inc( task->nm_total_size()); 2973 _perf_sum_nmethod_code_size->inc(task->nm_insts_size()); 2974 _perf_total_compile_count->inc(); 2975 } 2976 2977 if (is_osr) { 2978 if (UsePerfData) _perf_total_osr_compile_count->inc(); 2979 _total_osr_compile_count++; 2980 } else { 2981 if (UsePerfData) _perf_total_standard_compile_count->inc(); 2982 _total_standard_compile_count++; 2983 } 2984 } 2985 // set the current method for the thread to null 2986 if (UsePerfData) counters->set_current_method(""); 2987 } 2988 2989 const char* CompileBroker::compiler_name(int comp_level) { 2990 AbstractCompiler *comp = CompileBroker::compiler(comp_level); 2991 if (comp == nullptr) { 2992 return "no compiler"; 2993 } else { 2994 return (comp->name()); 2995 } 2996 } 2997 2998 jlong CompileBroker::total_compilation_ticks() { 2999 return _perf_total_compilation != nullptr ? _perf_total_compilation->get_value() : 0; 3000 } 3001 3002 void CompileBroker::log_not_entrant(nmethod* nm) { 3003 _total_not_entrant_count++; 3004 if (CITime || log_is_enabled(Info, init)) { 3005 CompilerStatistics* stats = nullptr; 3006 int level = nm->comp_level(); 3007 if (nm->is_scc()) { 3008 if (nm->preloaded()) { 3009 assert(level == CompLevel_full_optimization, "%d", level); 3010 level = CompLevel_full_optimization + 1; 3011 } 3012 stats = &_scc_stats_per_level[level - 1]; 3013 } else { 3014 stats = &_stats_per_level[level - 1]; 3015 } 3016 stats->_made_not_entrant._count++; 3017 } 3018 } 3019 3020 void CompileBroker::print_times(const char* name, CompilerStatistics* stats) { 3021 tty->print_cr(" %s {speed: %6.3f bytes/s; standard: %6.3f s, %u bytes, %u methods; osr: %6.3f s, %u bytes, %u methods; nmethods_size: %u bytes; nmethods_code_size: %u bytes}", 3022 name, stats->bytes_per_second(), 3023 stats->_standard._time.seconds(), stats->_standard._bytes, stats->_standard._count, 3024 stats->_osr._time.seconds(), stats->_osr._bytes, stats->_osr._count, 3025 stats->_nmethods_size, stats->_nmethods_code_size); 3026 } 3027 3028 static void print_helper(outputStream* st, const char* name, CompilerStatistics::Data data, bool print_time = true) { 3029 if (data._count > 0) { 3030 st->print("; %s: %4u methods", name, data._count); 3031 if (print_time) { 3032 st->print(" (in %.3fs)", data._time.seconds()); 3033 } 3034 } 3035 } 3036 3037 static void print_tier_helper(outputStream* st, const char* prefix, int tier, CompilerStatistics* stats) { 3038 st->print(" %s%d: %5u methods", prefix, tier, stats->_standard._count); 3039 if (stats->_standard._count > 0) { 3040 st->print(" (in %.3fs)", stats->_standard._time.seconds()); 3041 } 3042 print_helper(st, "osr", stats->_osr); 3043 print_helper(st, "bailout", stats->_bailout); 3044 print_helper(st, "invalid", stats->_invalidated); 3045 print_helper(st, "not_entrant", stats->_made_not_entrant, false); 3046 st->cr(); 3047 } 3048 3049 static void print_queue_info(outputStream* st, CompileQueue* queue) { 3050 if (queue != nullptr) { 3051 MutexLocker ml(queue->lock()); 3052 3053 uint total_cnt = 0; 3054 uint active_cnt = 0; 3055 for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) { 3056 guarantee(jt != nullptr, ""); 3057 if (jt->is_Compiler_thread()) { 3058 CompilerThread* ct = (CompilerThread*)jt; 3059 3060 guarantee(ct != nullptr, ""); 3061 if (ct->queue() == queue) { 3062 ++total_cnt; 3063 CompileTask* task = ct->task(); 3064 if (task != nullptr) { 3065 ++active_cnt; 3066 } 3067 } 3068 } 3069 } 3070 3071 st->print(" %s (%d active / %d total threads): %u tasks", 3072 queue->name(), active_cnt, total_cnt, queue->size()); 3073 if (queue->size() > 0) { 3074 uint counts[] = {0, 0, 0, 0, 0}; // T1 ... T5 3075 for (CompileTask* task = queue->first(); task != nullptr; task = task->next()) { 3076 int tier = task->comp_level(); 3077 if (task->is_scc() && task->preload()) { 3078 assert(tier == CompLevel_full_optimization, "%d", tier); 3079 tier = CompLevel_full_optimization + 1; 3080 } 3081 counts[tier-1]++; 3082 } 3083 st->print(":"); 3084 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3085 uint cnt = counts[tier-1]; 3086 if (cnt > 0) { 3087 st->print(" T%d: %u tasks;", tier, cnt); 3088 } 3089 } 3090 } 3091 st->cr(); 3092 3093 // for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) { 3094 // guarantee(jt != nullptr, ""); 3095 // if (jt->is_Compiler_thread()) { 3096 // CompilerThread* ct = (CompilerThread*)jt; 3097 // 3098 // guarantee(ct != nullptr, ""); 3099 // if (ct->queue() == queue) { 3100 // ResourceMark rm; 3101 // CompileTask* task = ct->task(); 3102 // st->print(" %s: ", ct->name_raw()); 3103 // if (task != nullptr) { 3104 // task->print(st, nullptr, true /*short_form*/, false /*cr*/); 3105 // } 3106 // st->cr(); 3107 // } 3108 // } 3109 // } 3110 } 3111 } 3112 void CompileBroker::print_statistics_on(outputStream* st) { 3113 st->print_cr(" Total: %u methods; %u bailouts, %u invalidated, %u non_entrant", 3114 _total_compile_count, _total_bailout_count, _total_invalidated_count, _total_not_entrant_count); 3115 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) { 3116 print_tier_helper(st, "Tier", tier, &_stats_per_level[tier-1]); 3117 } 3118 st->cr(); 3119 3120 if (LoadCachedCode || StoreCachedCode) { 3121 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3122 if (tier != CompLevel_full_profile) { 3123 print_tier_helper(st, "SC T", tier, &_scc_stats_per_level[tier - 1]); 3124 } 3125 } 3126 st->cr(); 3127 } 3128 3129 print_queue_info(st, _c1_compile_queue); 3130 print_queue_info(st, _c2_compile_queue); 3131 print_queue_info(st, _c3_compile_queue); 3132 print_queue_info(st, _sc1_compile_queue); 3133 print_queue_info(st, _sc2_compile_queue); 3134 } 3135 3136 void CompileBroker::print_times(bool per_compiler, bool aggregate) { 3137 if (per_compiler) { 3138 if (aggregate) { 3139 tty->cr(); 3140 tty->print_cr("[%dms] Individual compiler times (for compiled methods only)", (int)tty->time_stamp().milliseconds()); 3141 tty->print_cr("------------------------------------------------"); 3142 tty->cr(); 3143 } 3144 for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) { 3145 AbstractCompiler* comp = _compilers[i]; 3146 if (comp != nullptr) { 3147 print_times(comp->name(), comp->stats()); 3148 } 3149 } 3150 if (_scc_stats._standard._count > 0) { 3151 print_times("SC", &_scc_stats); 3152 } 3153 if (aggregate) { 3154 tty->cr(); 3155 tty->print_cr("Individual compilation Tier times (for compiled methods only)"); 3156 tty->print_cr("------------------------------------------------"); 3157 tty->cr(); 3158 } 3159 char tier_name[256]; 3160 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) { 3161 CompilerStatistics* stats = &_stats_per_level[tier-1]; 3162 os::snprintf_checked(tier_name, sizeof(tier_name), "Tier%d", tier); 3163 print_times(tier_name, stats); 3164 } 3165 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3166 CompilerStatistics* stats = &_scc_stats_per_level[tier-1]; 3167 if (stats->_standard._bytes > 0) { 3168 os::snprintf_checked(tier_name, sizeof(tier_name), "SC T%d", tier); 3169 print_times(tier_name, stats); 3170 } 3171 } 3172 } 3173 3174 if (!aggregate) { 3175 return; 3176 } 3177 3178 elapsedTimer standard_compilation = CompileBroker::_t_standard_compilation; 3179 elapsedTimer osr_compilation = CompileBroker::_t_osr_compilation; 3180 elapsedTimer total_compilation = CompileBroker::_t_total_compilation; 3181 3182 uint standard_bytes_compiled = CompileBroker::_sum_standard_bytes_compiled; 3183 uint osr_bytes_compiled = CompileBroker::_sum_osr_bytes_compiled; 3184 3185 uint standard_compile_count = CompileBroker::_total_standard_compile_count; 3186 uint osr_compile_count = CompileBroker::_total_osr_compile_count; 3187 uint total_compile_count = CompileBroker::_total_compile_count; 3188 uint total_bailout_count = CompileBroker::_total_bailout_count; 3189 uint total_invalidated_count = CompileBroker::_total_invalidated_count; 3190 3191 uint nmethods_code_size = CompileBroker::_sum_nmethod_code_size; 3192 uint nmethods_size = CompileBroker::_sum_nmethod_size; 3193 3194 tty->cr(); 3195 tty->print_cr("Accumulated compiler times"); 3196 tty->print_cr("----------------------------------------------------------"); 3197 //0000000000111111111122222222223333333333444444444455555555556666666666 3198 //0123456789012345678901234567890123456789012345678901234567890123456789 3199 tty->print_cr(" Total compilation time : %7.3f s", total_compilation.seconds()); 3200 tty->print_cr(" Standard compilation : %7.3f s, Average : %2.3f s", 3201 standard_compilation.seconds(), 3202 standard_compile_count == 0 ? 0.0 : standard_compilation.seconds() / standard_compile_count); 3203 tty->print_cr(" Bailed out compilation : %7.3f s, Average : %2.3f s", 3204 CompileBroker::_t_bailedout_compilation.seconds(), 3205 total_bailout_count == 0 ? 0.0 : CompileBroker::_t_bailedout_compilation.seconds() / total_bailout_count); 3206 tty->print_cr(" On stack replacement : %7.3f s, Average : %2.3f s", 3207 osr_compilation.seconds(), 3208 osr_compile_count == 0 ? 0.0 : osr_compilation.seconds() / osr_compile_count); 3209 tty->print_cr(" Invalidated : %7.3f s, Average : %2.3f s", 3210 CompileBroker::_t_invalidated_compilation.seconds(), 3211 total_invalidated_count == 0 ? 0.0 : CompileBroker::_t_invalidated_compilation.seconds() / total_invalidated_count); 3212 3213 if (StoreCachedCode || LoadCachedCode) { // Check flags because SC cache could be closed already 3214 tty->cr(); 3215 SCCache::print_timers_on(tty); 3216 } 3217 AbstractCompiler *comp = compiler(CompLevel_simple); 3218 if (comp != nullptr) { 3219 tty->cr(); 3220 comp->print_timers(); 3221 } 3222 comp = compiler(CompLevel_full_optimization); 3223 if (comp != nullptr) { 3224 tty->cr(); 3225 comp->print_timers(); 3226 } 3227 comp = _compilers[2]; 3228 if (comp != nullptr) { 3229 tty->cr(); 3230 comp->print_timers(); 3231 } 3232 #if INCLUDE_JVMCI 3233 if (EnableJVMCI) { 3234 JVMCICompiler *jvmci_comp = JVMCICompiler::instance(false, JavaThread::current_or_null()); 3235 if (jvmci_comp != nullptr && jvmci_comp != comp) { 3236 tty->cr(); 3237 jvmci_comp->print_timers(); 3238 } 3239 } 3240 #endif 3241 3242 tty->cr(); 3243 tty->print_cr(" Total compiled methods : %8u methods", total_compile_count); 3244 tty->print_cr(" Standard compilation : %8u methods", standard_compile_count); 3245 tty->print_cr(" On stack replacement : %8u methods", osr_compile_count); 3246 uint tcb = osr_bytes_compiled + standard_bytes_compiled; 3247 tty->print_cr(" Total compiled bytecodes : %8u bytes", tcb); 3248 tty->print_cr(" Standard compilation : %8u bytes", standard_bytes_compiled); 3249 tty->print_cr(" On stack replacement : %8u bytes", osr_bytes_compiled); 3250 double tcs = total_compilation.seconds(); 3251 uint bps = tcs == 0.0 ? 0 : (uint)(tcb / tcs); 3252 tty->print_cr(" Average compilation speed : %8u bytes/s", bps); 3253 tty->cr(); 3254 tty->print_cr(" nmethod code size : %8u bytes", nmethods_code_size); 3255 tty->print_cr(" nmethod total size : %8u bytes", nmethods_size); 3256 } 3257 3258 // Print general/accumulated JIT information. 3259 void CompileBroker::print_info(outputStream *out) { 3260 if (out == nullptr) out = tty; 3261 out->cr(); 3262 out->print_cr("======================"); 3263 out->print_cr(" General JIT info "); 3264 out->print_cr("======================"); 3265 out->cr(); 3266 out->print_cr(" JIT is : %7s", should_compile_new_jobs() ? "on" : "off"); 3267 out->print_cr(" Compiler threads : %7d", (int)CICompilerCount); 3268 out->cr(); 3269 out->print_cr("CodeCache overview"); 3270 out->print_cr("--------------------------------------------------------"); 3271 out->cr(); 3272 out->print_cr(" Reserved size : %7zu KB", CodeCache::max_capacity() / K); 3273 out->print_cr(" Committed size : %7zu KB", CodeCache::capacity() / K); 3274 out->print_cr(" Unallocated capacity : %7zu KB", CodeCache::unallocated_capacity() / K); 3275 out->cr(); 3276 } 3277 3278 // Note: tty_lock must not be held upon entry to this function. 3279 // Print functions called from herein do "micro-locking" on tty_lock. 3280 // That's a tradeoff which keeps together important blocks of output. 3281 // At the same time, continuous tty_lock hold time is kept in check, 3282 // preventing concurrently printing threads from stalling a long time. 3283 void CompileBroker::print_heapinfo(outputStream* out, const char* function, size_t granularity) { 3284 TimeStamp ts_total; 3285 TimeStamp ts_global; 3286 TimeStamp ts; 3287 3288 bool allFun = !strcmp(function, "all"); 3289 bool aggregate = !strcmp(function, "aggregate") || !strcmp(function, "analyze") || allFun; 3290 bool usedSpace = !strcmp(function, "UsedSpace") || allFun; 3291 bool freeSpace = !strcmp(function, "FreeSpace") || allFun; 3292 bool methodCount = !strcmp(function, "MethodCount") || allFun; 3293 bool methodSpace = !strcmp(function, "MethodSpace") || allFun; 3294 bool methodAge = !strcmp(function, "MethodAge") || allFun; 3295 bool methodNames = !strcmp(function, "MethodNames") || allFun; 3296 bool discard = !strcmp(function, "discard") || allFun; 3297 3298 if (out == nullptr) { 3299 out = tty; 3300 } 3301 3302 if (!(aggregate || usedSpace || freeSpace || methodCount || methodSpace || methodAge || methodNames || discard)) { 3303 out->print_cr("\n__ CodeHeapStateAnalytics: Function %s is not supported", function); 3304 out->cr(); 3305 return; 3306 } 3307 3308 ts_total.update(); // record starting point 3309 3310 if (aggregate) { 3311 print_info(out); 3312 } 3313 3314 // We hold the CodeHeapStateAnalytics_lock all the time, from here until we leave this function. 3315 // That prevents other threads from destroying (making inconsistent) our view on the CodeHeap. 3316 // When we request individual parts of the analysis via the jcmd interface, it is possible 3317 // that in between another thread (another jcmd user or the vm running into CodeCache OOM) 3318 // updated the aggregated data. We will then see a modified, but again consistent, view 3319 // on the CodeHeap. That's a tolerable tradeoff we have to accept because we can't hold 3320 // a lock across user interaction. 3321 3322 // We should definitely acquire this lock before acquiring Compile_lock and CodeCache_lock. 3323 // CodeHeapStateAnalytics_lock may be held by a concurrent thread for a long time, 3324 // leading to an unnecessarily long hold time of the other locks we acquired before. 3325 ts.update(); // record starting point 3326 MutexLocker mu0(CodeHeapStateAnalytics_lock, Mutex::_safepoint_check_flag); 3327 out->print_cr("\n__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________\n", ts.seconds()); 3328 3329 // Holding the CodeCache_lock protects from concurrent alterations of the CodeCache. 3330 // Unfortunately, such protection is not sufficient: 3331 // When a new nmethod is created via ciEnv::register_method(), the 3332 // Compile_lock is taken first. After some initializations, 3333 // nmethod::new_nmethod() takes over, grabbing the CodeCache_lock 3334 // immediately (after finalizing the oop references). To lock out concurrent 3335 // modifiers, we have to grab both locks as well in the described sequence. 3336 // 3337 // If we serve an "allFun" call, it is beneficial to hold CodeCache_lock and Compile_lock 3338 // for the entire duration of aggregation and printing. That makes sure we see 3339 // a consistent picture and do not run into issues caused by concurrent alterations. 3340 bool should_take_Compile_lock = !SafepointSynchronize::is_at_safepoint() && 3341 !Compile_lock->owned_by_self(); 3342 bool should_take_CodeCache_lock = !SafepointSynchronize::is_at_safepoint() && 3343 !CodeCache_lock->owned_by_self(); 3344 bool take_global_lock_1 = allFun && should_take_Compile_lock; 3345 bool take_global_lock_2 = allFun && should_take_CodeCache_lock; 3346 bool take_function_lock_1 = !allFun && should_take_Compile_lock; 3347 bool take_function_lock_2 = !allFun && should_take_CodeCache_lock; 3348 bool take_global_locks = take_global_lock_1 || take_global_lock_2; 3349 bool take_function_locks = take_function_lock_1 || take_function_lock_2; 3350 3351 ts_global.update(); // record starting point 3352 3353 ConditionalMutexLocker mu1(Compile_lock, take_global_lock_1, Mutex::_safepoint_check_flag); 3354 ConditionalMutexLocker mu2(CodeCache_lock, take_global_lock_2, Mutex::_no_safepoint_check_flag); 3355 if (take_global_locks) { 3356 out->print_cr("\n__ Compile & CodeCache (global) lock wait took %10.3f seconds _________\n", ts_global.seconds()); 3357 ts_global.update(); // record starting point 3358 } 3359 3360 if (aggregate) { 3361 ts.update(); // record starting point 3362 ConditionalMutexLocker mu11(Compile_lock, take_function_lock_1, Mutex::_safepoint_check_flag); 3363 ConditionalMutexLocker mu22(CodeCache_lock, take_function_lock_2, Mutex::_no_safepoint_check_flag); 3364 if (take_function_locks) { 3365 out->print_cr("\n__ Compile & CodeCache (function) lock wait took %10.3f seconds _________\n", ts.seconds()); 3366 } 3367 3368 ts.update(); // record starting point 3369 CodeCache::aggregate(out, granularity); 3370 if (take_function_locks) { 3371 out->print_cr("\n__ Compile & CodeCache (function) lock hold took %10.3f seconds _________\n", ts.seconds()); 3372 } 3373 } 3374 3375 if (usedSpace) CodeCache::print_usedSpace(out); 3376 if (freeSpace) CodeCache::print_freeSpace(out); 3377 if (methodCount) CodeCache::print_count(out); 3378 if (methodSpace) CodeCache::print_space(out); 3379 if (methodAge) CodeCache::print_age(out); 3380 if (methodNames) { 3381 if (allFun) { 3382 // print_names() can only be used safely if the locks have been continuously held 3383 // since aggregation begin. That is true only for function "all". 3384 CodeCache::print_names(out); 3385 } else { 3386 out->print_cr("\nCodeHeapStateAnalytics: Function 'MethodNames' is only available as part of function 'all'"); 3387 } 3388 } 3389 if (discard) CodeCache::discard(out); 3390 3391 if (take_global_locks) { 3392 out->print_cr("\n__ Compile & CodeCache (global) lock hold took %10.3f seconds _________\n", ts_global.seconds()); 3393 } 3394 out->print_cr("\n__ CodeHeapStateAnalytics total duration %10.3f seconds _________\n", ts_total.seconds()); 3395 } --- EOF ---