1 /* 2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/cdsConfig.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/vmClasses.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "code/codeCache.hpp" 31 #include "code/codeHeapState.hpp" 32 #include "code/dependencyContext.hpp" 33 #include "compiler/compilationLog.hpp" 34 #include "compiler/compilationMemoryStatistic.hpp" 35 #include "compiler/compilationPolicy.hpp" 36 #include "compiler/compileBroker.hpp" 37 #include "compiler/compileLog.hpp" 38 #include "compiler/compilerEvent.hpp" 39 #include "compiler/compilerOracle.hpp" 40 #include "compiler/directivesParser.hpp" 41 #include "gc/shared/memAllocator.hpp" 42 #include "interpreter/linkResolver.hpp" 43 #include "jfr/jfrEvents.hpp" 44 #include "jvm.h" 45 #include "logging/log.hpp" 46 #include "logging/logStream.hpp" 47 #include "memory/allocation.inline.hpp" 48 #include "memory/resourceArea.hpp" 49 #include "memory/universe.hpp" 50 #include "oops/method.inline.hpp" 51 #include "oops/methodData.hpp" 52 #include "oops/oop.inline.hpp" 53 #include "prims/jvmtiExport.hpp" 54 #include "prims/nativeLookup.hpp" 55 #include "prims/whitebox.hpp" 56 #include "runtime/atomic.hpp" 57 #include "runtime/escapeBarrier.hpp" 58 #include "runtime/globals_extension.hpp" 59 #include "runtime/handles.inline.hpp" 60 #include "runtime/init.hpp" 61 #include "runtime/interfaceSupport.inline.hpp" 62 #include "runtime/java.hpp" 63 #include "runtime/javaCalls.hpp" 64 #include "runtime/jniHandles.inline.hpp" 65 #include "runtime/os.hpp" 66 #include "runtime/perfData.hpp" 67 #include "runtime/safepointVerifiers.hpp" 68 #include "runtime/sharedRuntime.hpp" 69 #include "runtime/threads.hpp" 70 #include "runtime/threadSMR.hpp" 71 #include "runtime/timerTrace.hpp" 72 #include "runtime/vframe.inline.hpp" 73 #include "utilities/debug.hpp" 74 #include "utilities/dtrace.hpp" 75 #include "utilities/events.hpp" 76 #include "utilities/formatBuffer.hpp" 77 #include "utilities/macros.hpp" 78 #ifdef COMPILER1 79 #include "c1/c1_Compiler.hpp" 80 #endif 81 #ifdef COMPILER2 82 #include "opto/c2compiler.hpp" 83 #endif 84 #if INCLUDE_JVMCI 85 #include "jvmci/jvmciEnv.hpp" 86 #include "jvmci/jvmciRuntime.hpp" 87 #endif 88 89 #ifdef DTRACE_ENABLED 90 91 // Only bother with this argument setup if dtrace is available 92 93 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \ 94 { \ 95 Symbol* klass_name = (method)->klass_name(); \ 96 Symbol* name = (method)->name(); \ 97 Symbol* signature = (method)->signature(); \ 98 HOTSPOT_METHOD_COMPILE_BEGIN( \ 99 (char *) comp_name, strlen(comp_name), \ 100 (char *) klass_name->bytes(), klass_name->utf8_length(), \ 101 (char *) name->bytes(), name->utf8_length(), \ 102 (char *) signature->bytes(), signature->utf8_length()); \ 103 } 104 105 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \ 106 { \ 107 Symbol* klass_name = (method)->klass_name(); \ 108 Symbol* name = (method)->name(); \ 109 Symbol* signature = (method)->signature(); \ 110 HOTSPOT_METHOD_COMPILE_END( \ 111 (char *) comp_name, strlen(comp_name), \ 112 (char *) klass_name->bytes(), klass_name->utf8_length(), \ 113 (char *) name->bytes(), name->utf8_length(), \ 114 (char *) signature->bytes(), signature->utf8_length(), (success)); \ 115 } 116 117 #else // ndef DTRACE_ENABLED 118 119 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) 120 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) 121 122 #endif // ndef DTRACE_ENABLED 123 124 bool CompileBroker::_initialized = false; 125 volatile bool CompileBroker::_should_block = false; 126 volatile int CompileBroker::_print_compilation_warning = 0; 127 volatile jint CompileBroker::_should_compile_new_jobs = run_compilation; 128 129 // The installed compiler(s) 130 AbstractCompiler* CompileBroker::_compilers[2]; 131 132 // The maximum numbers of compiler threads to be determined during startup. 133 int CompileBroker::_c1_count = 0; 134 int CompileBroker::_c2_count = 0; 135 136 // An array of compiler names as Java String objects 137 jobject* CompileBroker::_compiler1_objects = nullptr; 138 jobject* CompileBroker::_compiler2_objects = nullptr; 139 140 CompileLog** CompileBroker::_compiler1_logs = nullptr; 141 CompileLog** CompileBroker::_compiler2_logs = nullptr; 142 143 // These counters are used to assign an unique ID to each compilation. 144 volatile jint CompileBroker::_compilation_id = 0; 145 volatile jint CompileBroker::_osr_compilation_id = 0; 146 volatile jint CompileBroker::_native_compilation_id = 0; 147 148 // Performance counters 149 PerfCounter* CompileBroker::_perf_total_compilation = nullptr; 150 PerfCounter* CompileBroker::_perf_osr_compilation = nullptr; 151 PerfCounter* CompileBroker::_perf_standard_compilation = nullptr; 152 153 PerfCounter* CompileBroker::_perf_total_bailout_count = nullptr; 154 PerfCounter* CompileBroker::_perf_total_invalidated_count = nullptr; 155 PerfCounter* CompileBroker::_perf_total_compile_count = nullptr; 156 PerfCounter* CompileBroker::_perf_total_osr_compile_count = nullptr; 157 PerfCounter* CompileBroker::_perf_total_standard_compile_count = nullptr; 158 159 PerfCounter* CompileBroker::_perf_sum_osr_bytes_compiled = nullptr; 160 PerfCounter* CompileBroker::_perf_sum_standard_bytes_compiled = nullptr; 161 PerfCounter* CompileBroker::_perf_sum_nmethod_size = nullptr; 162 PerfCounter* CompileBroker::_perf_sum_nmethod_code_size = nullptr; 163 164 PerfStringVariable* CompileBroker::_perf_last_method = nullptr; 165 PerfStringVariable* CompileBroker::_perf_last_failed_method = nullptr; 166 PerfStringVariable* CompileBroker::_perf_last_invalidated_method = nullptr; 167 PerfVariable* CompileBroker::_perf_last_compile_type = nullptr; 168 PerfVariable* CompileBroker::_perf_last_compile_size = nullptr; 169 PerfVariable* CompileBroker::_perf_last_failed_type = nullptr; 170 PerfVariable* CompileBroker::_perf_last_invalidated_type = nullptr; 171 172 // Timers and counters for generating statistics 173 elapsedTimer CompileBroker::_t_total_compilation; 174 elapsedTimer CompileBroker::_t_osr_compilation; 175 elapsedTimer CompileBroker::_t_standard_compilation; 176 elapsedTimer CompileBroker::_t_invalidated_compilation; 177 elapsedTimer CompileBroker::_t_bailedout_compilation; 178 179 uint CompileBroker::_total_bailout_count = 0; 180 uint CompileBroker::_total_invalidated_count = 0; 181 uint CompileBroker::_total_compile_count = 0; 182 uint CompileBroker::_total_osr_compile_count = 0; 183 uint CompileBroker::_total_standard_compile_count = 0; 184 uint CompileBroker::_total_compiler_stopped_count = 0; 185 uint CompileBroker::_total_compiler_restarted_count = 0; 186 187 uint CompileBroker::_sum_osr_bytes_compiled = 0; 188 uint CompileBroker::_sum_standard_bytes_compiled = 0; 189 uint CompileBroker::_sum_nmethod_size = 0; 190 uint CompileBroker::_sum_nmethod_code_size = 0; 191 192 jlong CompileBroker::_peak_compilation_time = 0; 193 194 CompilerStatistics CompileBroker::_stats_per_level[CompLevel_full_optimization]; 195 196 CompileQueue* CompileBroker::_c2_compile_queue = nullptr; 197 CompileQueue* CompileBroker::_c1_compile_queue = nullptr; 198 199 bool compileBroker_init() { 200 if (LogEvents) { 201 CompilationLog::init(); 202 } 203 204 // init directives stack, adding default directive 205 DirectivesStack::init(); 206 207 if (DirectivesParser::has_file()) { 208 return DirectivesParser::parse_from_flag(); 209 } else if (CompilerDirectivesPrint) { 210 // Print default directive even when no other was added 211 DirectivesStack::print(tty); 212 } 213 214 return true; 215 } 216 217 CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) { 218 CompilerThread* thread = CompilerThread::current(); 219 thread->set_task(task); 220 CompileLog* log = thread->log(); 221 if (log != nullptr && !task->is_unloaded()) task->log_task_start(log); 222 } 223 224 CompileTaskWrapper::~CompileTaskWrapper() { 225 CompilerThread* thread = CompilerThread::current(); 226 CompileTask* task = thread->task(); 227 CompileLog* log = thread->log(); 228 if (log != nullptr && !task->is_unloaded()) task->log_task_done(log); 229 thread->set_task(nullptr); 230 thread->set_env(nullptr); 231 if (task->is_blocking()) { 232 bool free_task = false; 233 { 234 MutexLocker notifier(thread, task->lock()); 235 task->mark_complete(); 236 #if INCLUDE_JVMCI 237 if (CompileBroker::compiler(task->comp_level())->is_jvmci()) { 238 if (!task->has_waiter()) { 239 // The waiting thread timed out and thus did not free the task. 240 free_task = true; 241 } 242 task->set_blocking_jvmci_compile_state(nullptr); 243 } 244 #endif 245 if (!free_task) { 246 // Notify the waiting thread that the compilation has completed 247 // so that it can free the task. 248 task->lock()->notify_all(); 249 } 250 } 251 if (free_task) { 252 // The task can only be freed once the task lock is released. 253 CompileTask::free(task); 254 } 255 } else { 256 task->mark_complete(); 257 258 // By convention, the compiling thread is responsible for 259 // recycling a non-blocking CompileTask. 260 CompileTask::free(task); 261 } 262 } 263 264 /** 265 * Check if a CompilerThread can be removed and update count if requested. 266 */ 267 bool CompileBroker::can_remove(CompilerThread *ct, bool do_it) { 268 assert(UseDynamicNumberOfCompilerThreads, "or shouldn't be here"); 269 if (!ReduceNumberOfCompilerThreads) return false; 270 271 AbstractCompiler *compiler = ct->compiler(); 272 int compiler_count = compiler->num_compiler_threads(); 273 bool c1 = compiler->is_c1(); 274 275 // Keep at least 1 compiler thread of each type. 276 if (compiler_count < 2) return false; 277 278 // Keep thread alive for at least some time. 279 if (ct->idle_time_millis() < (c1 ? 500 : 100)) return false; 280 281 #if INCLUDE_JVMCI 282 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) { 283 // Handles for JVMCI thread objects may get released concurrently. 284 if (do_it) { 285 assert(CompileThread_lock->owner() == ct, "must be holding lock"); 286 } else { 287 // Skip check if it's the last thread and let caller check again. 288 return true; 289 } 290 } 291 #endif 292 293 // We only allow the last compiler thread of each type to get removed. 294 jobject last_compiler = c1 ? compiler1_object(compiler_count - 1) 295 : compiler2_object(compiler_count - 1); 296 if (ct->threadObj() == JNIHandles::resolve_non_null(last_compiler)) { 297 if (do_it) { 298 assert_locked_or_safepoint(CompileThread_lock); // Update must be consistent. 299 compiler->set_num_compiler_threads(compiler_count - 1); 300 #if INCLUDE_JVMCI 301 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) { 302 // Old j.l.Thread object can die when no longer referenced elsewhere. 303 JNIHandles::destroy_global(compiler2_object(compiler_count - 1)); 304 _compiler2_objects[compiler_count - 1] = nullptr; 305 } 306 #endif 307 } 308 return true; 309 } 310 return false; 311 } 312 313 /** 314 * Add a CompileTask to a CompileQueue. 315 */ 316 void CompileQueue::add(CompileTask* task) { 317 assert(MethodCompileQueue_lock->owned_by_self(), "must own lock"); 318 319 task->set_next(nullptr); 320 task->set_prev(nullptr); 321 322 if (_last == nullptr) { 323 // The compile queue is empty. 324 assert(_first == nullptr, "queue is empty"); 325 _first = task; 326 _last = task; 327 } else { 328 // Append the task to the queue. 329 assert(_last->next() == nullptr, "not last"); 330 _last->set_next(task); 331 task->set_prev(_last); 332 _last = task; 333 } 334 ++_size; 335 ++_total_added; 336 if (_size > _peak_size) { 337 _peak_size = _size; 338 } 339 340 // Mark the method as being in the compile queue. 341 task->method()->set_queued_for_compilation(); 342 343 if (CIPrintCompileQueue) { 344 print_tty(); 345 } 346 347 if (LogCompilation && xtty != nullptr) { 348 task->log_task_queued(); 349 } 350 351 if (TrainingData::need_data() && !CDSConfig::is_dumping_final_static_archive()) { 352 CompileTrainingData* ctd = CompileTrainingData::make(task); 353 if (ctd != nullptr) { 354 task->set_training_data(ctd); 355 } 356 } 357 358 // Notify CompilerThreads that a task is available. 359 MethodCompileQueue_lock->notify_all(); 360 } 361 362 /** 363 * Empties compilation queue by putting all compilation tasks onto 364 * a freelist. Furthermore, the method wakes up all threads that are 365 * waiting on a compilation task to finish. This can happen if background 366 * compilation is disabled. 367 */ 368 void CompileQueue::free_all() { 369 MutexLocker mu(MethodCompileQueue_lock); 370 CompileTask* next = _first; 371 372 // Iterate over all tasks in the compile queue 373 while (next != nullptr) { 374 CompileTask* current = next; 375 next = current->next(); 376 bool found_waiter = false; 377 { 378 MutexLocker ct_lock(current->lock()); 379 assert(current->waiting_for_completion_count() <= 1, "more than one thread are waiting for task"); 380 if (current->waiting_for_completion_count() > 0) { 381 // If another thread waits for this task, we must wake them up 382 // so they will stop waiting and free the task. 383 current->lock()->notify(); 384 found_waiter = true; 385 } 386 } 387 if (!found_waiter) { 388 // If no one was waiting for this task, we need to free it ourselves. In this case, the task 389 // is also certainly unlocked, because, again, there is no waiter. 390 // Otherwise, by convention, it's the waiters responsibility to free the task. 391 // Put the task back on the freelist. 392 CompileTask::free(current); 393 } 394 } 395 _first = nullptr; 396 _last = nullptr; 397 398 // Wake up all threads that block on the queue. 399 MethodCompileQueue_lock->notify_all(); 400 } 401 402 /** 403 * Get the next CompileTask from a CompileQueue 404 */ 405 CompileTask* CompileQueue::get(CompilerThread* thread) { 406 // save methods from RedefineClasses across safepoint 407 // across MethodCompileQueue_lock below. 408 methodHandle save_method; 409 410 MonitorLocker locker(MethodCompileQueue_lock); 411 // If _first is null we have no more compile jobs. There are two reasons for 412 // having no compile jobs: First, we compiled everything we wanted. Second, 413 // we ran out of code cache so compilation has been disabled. In the latter 414 // case we perform code cache sweeps to free memory such that we can re-enable 415 // compilation. 416 while (_first == nullptr) { 417 // Exit loop if compilation is disabled forever 418 if (CompileBroker::is_compilation_disabled_forever()) { 419 return nullptr; 420 } 421 422 AbstractCompiler* compiler = thread->compiler(); 423 guarantee(compiler != nullptr, "Compiler object must exist"); 424 compiler->on_empty_queue(this, thread); 425 if (_first != nullptr) { 426 // The call to on_empty_queue may have temporarily unlocked the MCQ lock 427 // so check again whether any tasks were added to the queue. 428 break; 429 } 430 431 // If there are no compilation tasks and we can compile new jobs 432 // (i.e., there is enough free space in the code cache) there is 433 // no need to invoke the GC. 434 // We need a timed wait here, since compiler threads can exit if compilation 435 // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads 436 // is not critical and we do not want idle compiler threads to wake up too often. 437 locker.wait(5*1000); 438 439 if (UseDynamicNumberOfCompilerThreads && _first == nullptr) { 440 // Still nothing to compile. Give caller a chance to stop this thread. 441 if (CompileBroker::can_remove(CompilerThread::current(), false)) return nullptr; 442 } 443 } 444 445 if (CompileBroker::is_compilation_disabled_forever()) { 446 return nullptr; 447 } 448 449 CompileTask* task; 450 { 451 NoSafepointVerifier nsv; 452 task = CompilationPolicy::select_task(this, thread); 453 if (task != nullptr) { 454 task = task->select_for_compilation(); 455 } 456 } 457 458 if (task != nullptr) { 459 // Save method pointers across unlock safepoint. The task is removed from 460 // the compilation queue, which is walked during RedefineClasses. 461 Thread* thread = Thread::current(); 462 save_method = methodHandle(thread, task->method()); 463 464 remove(task); 465 } 466 purge_stale_tasks(); // may temporarily release MCQ lock 467 return task; 468 } 469 470 // Clean & deallocate stale compile tasks. 471 // Temporarily releases MethodCompileQueue lock. 472 void CompileQueue::purge_stale_tasks() { 473 assert(MethodCompileQueue_lock->owned_by_self(), "must own lock"); 474 if (_first_stale != nullptr) { 475 // Stale tasks are purged when MCQ lock is released, 476 // but _first_stale updates are protected by MCQ lock. 477 // Once task processing starts and MCQ lock is released, 478 // other compiler threads can reuse _first_stale. 479 CompileTask* head = _first_stale; 480 _first_stale = nullptr; 481 { 482 MutexUnlocker ul(MethodCompileQueue_lock); 483 for (CompileTask* task = head; task != nullptr; ) { 484 CompileTask* next_task = task->next(); 485 CompileTaskWrapper ctw(task); // Frees the task 486 task->set_failure_reason("stale task"); 487 task = next_task; 488 } 489 } 490 } 491 } 492 493 void CompileQueue::remove(CompileTask* task) { 494 assert(MethodCompileQueue_lock->owned_by_self(), "must own lock"); 495 if (task->prev() != nullptr) { 496 task->prev()->set_next(task->next()); 497 } else { 498 // max is the first element 499 assert(task == _first, "Sanity"); 500 _first = task->next(); 501 } 502 503 if (task->next() != nullptr) { 504 task->next()->set_prev(task->prev()); 505 } else { 506 // max is the last element 507 assert(task == _last, "Sanity"); 508 _last = task->prev(); 509 } 510 --_size; 511 ++_total_removed; 512 } 513 514 void CompileQueue::remove_and_mark_stale(CompileTask* task) { 515 assert(MethodCompileQueue_lock->owned_by_self(), "must own lock"); 516 remove(task); 517 518 // Enqueue the task for reclamation (should be done outside MCQ lock) 519 task->set_next(_first_stale); 520 task->set_prev(nullptr); 521 _first_stale = task; 522 } 523 524 // methods in the compile queue need to be marked as used on the stack 525 // so that they don't get reclaimed by Redefine Classes 526 void CompileQueue::mark_on_stack() { 527 CompileTask* task = _first; 528 while (task != nullptr) { 529 task->mark_on_stack(); 530 task = task->next(); 531 } 532 } 533 534 535 CompileQueue* CompileBroker::compile_queue(int comp_level) { 536 if (is_c2_compile(comp_level)) return _c2_compile_queue; 537 if (is_c1_compile(comp_level)) return _c1_compile_queue; 538 return nullptr; 539 } 540 541 CompileQueue* CompileBroker::c1_compile_queue() { 542 return _c1_compile_queue; 543 } 544 545 CompileQueue* CompileBroker::c2_compile_queue() { 546 return _c2_compile_queue; 547 } 548 549 void CompileBroker::print_compile_queues(outputStream* st) { 550 st->print_cr("Current compiles: "); 551 552 char buf[2000]; 553 int buflen = sizeof(buf); 554 Threads::print_threads_compiling(st, buf, buflen, /* short_form = */ true); 555 556 st->cr(); 557 if (_c1_compile_queue != nullptr) { 558 _c1_compile_queue->print(st); 559 } 560 if (_c2_compile_queue != nullptr) { 561 _c2_compile_queue->print(st); 562 } 563 } 564 565 void CompileQueue::print(outputStream* st) { 566 assert_locked_or_safepoint(MethodCompileQueue_lock); 567 st->print_cr("%s:", name()); 568 CompileTask* task = _first; 569 if (task == nullptr) { 570 st->print_cr("Empty"); 571 } else { 572 while (task != nullptr) { 573 task->print(st, nullptr, true, true); 574 task = task->next(); 575 } 576 } 577 st->cr(); 578 } 579 580 void CompileQueue::print_tty() { 581 stringStream ss; 582 // Dump the compile queue into a buffer before locking the tty 583 print(&ss); 584 { 585 ttyLocker ttyl; 586 tty->print("%s", ss.freeze()); 587 } 588 } 589 590 CompilerCounters::CompilerCounters() { 591 _current_method[0] = '\0'; 592 _compile_type = CompileBroker::no_compile; 593 } 594 595 #if INCLUDE_JFR && COMPILER2_OR_JVMCI 596 // It appends new compiler phase names to growable array phase_names(a new CompilerPhaseType mapping 597 // in compiler/compilerEvent.cpp) and registers it with its serializer. 598 // 599 // c2 uses explicit CompilerPhaseType idToPhase mapping in opto/phasetype.hpp, 600 // so if c2 is used, it should be always registered first. 601 // This function is called during vm initialization. 602 static void register_jfr_phasetype_serializer(CompilerType compiler_type) { 603 ResourceMark rm; 604 static bool first_registration = true; 605 if (compiler_type == compiler_jvmci) { 606 CompilerEvent::PhaseEvent::get_phase_id("NOT_A_PHASE_NAME", false, false, false); 607 first_registration = false; 608 #ifdef COMPILER2 609 } else if (compiler_type == compiler_c2) { 610 assert(first_registration, "invariant"); // c2 must be registered first. 611 for (int i = 0; i < PHASE_NUM_TYPES; i++) { 612 const char* phase_name = CompilerPhaseTypeHelper::to_description((CompilerPhaseType) i); 613 CompilerEvent::PhaseEvent::get_phase_id(phase_name, false, false, false); 614 } 615 first_registration = false; 616 #endif // COMPILER2 617 } 618 } 619 #endif // INCLUDE_JFR && COMPILER2_OR_JVMCI 620 621 // ------------------------------------------------------------------ 622 // CompileBroker::compilation_init 623 // 624 // Initialize the Compilation object 625 void CompileBroker::compilation_init(JavaThread* THREAD) { 626 // No need to initialize compilation system if we do not use it. 627 if (!UseCompiler) { 628 return; 629 } 630 // Set the interface to the current compiler(s). 631 _c1_count = CompilationPolicy::c1_count(); 632 _c2_count = CompilationPolicy::c2_count(); 633 634 #if INCLUDE_JVMCI 635 if (EnableJVMCI) { 636 // This is creating a JVMCICompiler singleton. 637 JVMCICompiler* jvmci = new JVMCICompiler(); 638 639 if (UseJVMCICompiler) { 640 _compilers[1] = jvmci; 641 if (FLAG_IS_DEFAULT(JVMCIThreads)) { 642 if (BootstrapJVMCI) { 643 // JVMCI will bootstrap so give it more threads 644 _c2_count = MIN2(32, os::active_processor_count()); 645 } 646 } else { 647 _c2_count = JVMCIThreads; 648 } 649 if (FLAG_IS_DEFAULT(JVMCIHostThreads)) { 650 } else { 651 #ifdef COMPILER1 652 _c1_count = JVMCIHostThreads; 653 #endif // COMPILER1 654 } 655 } 656 } 657 #endif // INCLUDE_JVMCI 658 659 #ifdef COMPILER1 660 if (_c1_count > 0) { 661 _compilers[0] = new Compiler(); 662 } 663 #endif // COMPILER1 664 665 #ifdef COMPILER2 666 if (true JVMCI_ONLY( && !UseJVMCICompiler)) { 667 if (_c2_count > 0) { 668 _compilers[1] = new C2Compiler(); 669 // Register c2 first as c2 CompilerPhaseType idToPhase mapping is explicit. 670 // idToPhase mapping for c2 is in opto/phasetype.hpp 671 JFR_ONLY(register_jfr_phasetype_serializer(compiler_c2);) 672 } 673 } 674 #endif // COMPILER2 675 676 #if INCLUDE_JVMCI 677 // Register after c2 registration. 678 // JVMCI CompilerPhaseType idToPhase mapping is dynamic. 679 if (EnableJVMCI) { 680 JFR_ONLY(register_jfr_phasetype_serializer(compiler_jvmci);) 681 } 682 #endif // INCLUDE_JVMCI 683 684 if (CompilerOracle::should_collect_memstat()) { 685 CompilationMemoryStatistic::initialize(); 686 } 687 688 // Start the compiler thread(s) 689 init_compiler_threads(); 690 // totalTime performance counter is always created as it is required 691 // by the implementation of java.lang.management.CompilationMXBean. 692 { 693 // Ensure OOM leads to vm_exit_during_initialization. 694 EXCEPTION_MARK; 695 _perf_total_compilation = 696 PerfDataManager::create_counter(JAVA_CI, "totalTime", 697 PerfData::U_Ticks, CHECK); 698 } 699 700 if (UsePerfData) { 701 702 EXCEPTION_MARK; 703 704 // create the jvmstat performance counters 705 _perf_osr_compilation = 706 PerfDataManager::create_counter(SUN_CI, "osrTime", 707 PerfData::U_Ticks, CHECK); 708 709 _perf_standard_compilation = 710 PerfDataManager::create_counter(SUN_CI, "standardTime", 711 PerfData::U_Ticks, CHECK); 712 713 _perf_total_bailout_count = 714 PerfDataManager::create_counter(SUN_CI, "totalBailouts", 715 PerfData::U_Events, CHECK); 716 717 _perf_total_invalidated_count = 718 PerfDataManager::create_counter(SUN_CI, "totalInvalidates", 719 PerfData::U_Events, CHECK); 720 721 _perf_total_compile_count = 722 PerfDataManager::create_counter(SUN_CI, "totalCompiles", 723 PerfData::U_Events, CHECK); 724 _perf_total_osr_compile_count = 725 PerfDataManager::create_counter(SUN_CI, "osrCompiles", 726 PerfData::U_Events, CHECK); 727 728 _perf_total_standard_compile_count = 729 PerfDataManager::create_counter(SUN_CI, "standardCompiles", 730 PerfData::U_Events, CHECK); 731 732 _perf_sum_osr_bytes_compiled = 733 PerfDataManager::create_counter(SUN_CI, "osrBytes", 734 PerfData::U_Bytes, CHECK); 735 736 _perf_sum_standard_bytes_compiled = 737 PerfDataManager::create_counter(SUN_CI, "standardBytes", 738 PerfData::U_Bytes, CHECK); 739 740 _perf_sum_nmethod_size = 741 PerfDataManager::create_counter(SUN_CI, "nmethodSize", 742 PerfData::U_Bytes, CHECK); 743 744 _perf_sum_nmethod_code_size = 745 PerfDataManager::create_counter(SUN_CI, "nmethodCodeSize", 746 PerfData::U_Bytes, CHECK); 747 748 _perf_last_method = 749 PerfDataManager::create_string_variable(SUN_CI, "lastMethod", 750 CompilerCounters::cmname_buffer_length, 751 "", CHECK); 752 753 _perf_last_failed_method = 754 PerfDataManager::create_string_variable(SUN_CI, "lastFailedMethod", 755 CompilerCounters::cmname_buffer_length, 756 "", CHECK); 757 758 _perf_last_invalidated_method = 759 PerfDataManager::create_string_variable(SUN_CI, "lastInvalidatedMethod", 760 CompilerCounters::cmname_buffer_length, 761 "", CHECK); 762 763 _perf_last_compile_type = 764 PerfDataManager::create_variable(SUN_CI, "lastType", 765 PerfData::U_None, 766 (jlong)CompileBroker::no_compile, 767 CHECK); 768 769 _perf_last_compile_size = 770 PerfDataManager::create_variable(SUN_CI, "lastSize", 771 PerfData::U_Bytes, 772 (jlong)CompileBroker::no_compile, 773 CHECK); 774 775 776 _perf_last_failed_type = 777 PerfDataManager::create_variable(SUN_CI, "lastFailedType", 778 PerfData::U_None, 779 (jlong)CompileBroker::no_compile, 780 CHECK); 781 782 _perf_last_invalidated_type = 783 PerfDataManager::create_variable(SUN_CI, "lastInvalidatedType", 784 PerfData::U_None, 785 (jlong)CompileBroker::no_compile, 786 CHECK); 787 } 788 789 _initialized = true; 790 } 791 792 void TrainingReplayThread::training_replay_thread_entry(JavaThread* thread, TRAPS) { 793 CompilationPolicy::replay_training_at_init_loop(thread); 794 } 795 796 #if defined(ASSERT) && COMPILER2_OR_JVMCI 797 // Entry for DeoptimizeObjectsALotThread. The threads are started in 798 // CompileBroker::init_compiler_threads() iff DeoptimizeObjectsALot is enabled 799 void DeoptimizeObjectsALotThread::deopt_objs_alot_thread_entry(JavaThread* thread, TRAPS) { 800 DeoptimizeObjectsALotThread* dt = ((DeoptimizeObjectsALotThread*) thread); 801 bool enter_single_loop; 802 { 803 MonitorLocker ml(dt, EscapeBarrier_lock, Mutex::_no_safepoint_check_flag); 804 static int single_thread_count = 0; 805 enter_single_loop = single_thread_count++ < DeoptimizeObjectsALotThreadCountSingle; 806 } 807 if (enter_single_loop) { 808 dt->deoptimize_objects_alot_loop_single(); 809 } else { 810 dt->deoptimize_objects_alot_loop_all(); 811 } 812 } 813 814 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each 815 // barrier targets a single thread which is selected round robin. 816 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_single() { 817 HandleMark hm(this); 818 while (true) { 819 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *deoptee_thread = jtiwh.next(); ) { 820 { // Begin new scope for escape barrier 821 HandleMarkCleaner hmc(this); 822 ResourceMark rm(this); 823 EscapeBarrier eb(true, this, deoptee_thread); 824 eb.deoptimize_objects(100); 825 } 826 // Now sleep after the escape barriers destructor resumed deoptee_thread. 827 sleep(DeoptimizeObjectsALotInterval); 828 } 829 } 830 } 831 832 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each 833 // barrier targets all java threads in the vm at once. 834 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_all() { 835 HandleMark hm(this); 836 while (true) { 837 { // Begin new scope for escape barrier 838 HandleMarkCleaner hmc(this); 839 ResourceMark rm(this); 840 EscapeBarrier eb(true, this); 841 eb.deoptimize_objects_all_threads(); 842 } 843 // Now sleep after the escape barriers destructor resumed the java threads. 844 sleep(DeoptimizeObjectsALotInterval); 845 } 846 } 847 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI 848 849 850 JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, CompileQueue* queue, AbstractCompiler* comp, JavaThread* THREAD) { 851 Handle thread_oop(THREAD, JNIHandles::resolve_non_null(thread_handle)); 852 853 if (java_lang_Thread::thread(thread_oop()) != nullptr) { 854 assert(type == compiler_t, "should only happen with reused compiler threads"); 855 // The compiler thread hasn't actually exited yet so don't try to reuse it 856 return nullptr; 857 } 858 859 JavaThread* new_thread = nullptr; 860 switch (type) { 861 case compiler_t: 862 assert(comp != nullptr, "Compiler instance missing."); 863 if (!InjectCompilerCreationFailure || comp->num_compiler_threads() == 0) { 864 CompilerCounters* counters = new CompilerCounters(); 865 new_thread = new CompilerThread(queue, counters); 866 } 867 break; 868 #if defined(ASSERT) && COMPILER2_OR_JVMCI 869 case deoptimizer_t: 870 new_thread = new DeoptimizeObjectsALotThread(); 871 break; 872 #endif // ASSERT 873 case training_replay_t: 874 new_thread = new TrainingReplayThread(); 875 break; 876 default: 877 ShouldNotReachHere(); 878 } 879 880 // At this point the new CompilerThread data-races with this startup 881 // thread (which is the main thread and NOT the VM thread). 882 // This means Java bytecodes being executed at startup can 883 // queue compile jobs which will run at whatever default priority the 884 // newly created CompilerThread runs at. 885 886 887 // At this point it may be possible that no osthread was created for the 888 // JavaThread due to lack of resources. We will handle that failure below. 889 // Also check new_thread so that static analysis is happy. 890 if (new_thread != nullptr && new_thread->osthread() != nullptr) { 891 892 if (type == compiler_t) { 893 CompilerThread::cast(new_thread)->set_compiler(comp); 894 } 895 896 // Note that we cannot call os::set_priority because it expects Java 897 // priorities and we are *explicitly* using OS priorities so that it's 898 // possible to set the compiler thread priority higher than any Java 899 // thread. 900 901 int native_prio = CompilerThreadPriority; 902 if (native_prio == -1) { 903 if (UseCriticalCompilerThreadPriority) { 904 native_prio = os::java_to_os_priority[CriticalPriority]; 905 } else { 906 native_prio = os::java_to_os_priority[NearMaxPriority]; 907 } 908 } 909 os::set_native_priority(new_thread, native_prio); 910 911 // Note that this only sets the JavaThread _priority field, which by 912 // definition is limited to Java priorities and not OS priorities. 913 JavaThread::start_internal_daemon(THREAD, new_thread, thread_oop, NearMaxPriority); 914 915 } else { // osthread initialization failure 916 if (UseDynamicNumberOfCompilerThreads && type == compiler_t 917 && comp->num_compiler_threads() > 0) { 918 // The new thread is not known to Thread-SMR yet so we can just delete. 919 delete new_thread; 920 return nullptr; 921 } else { 922 vm_exit_during_initialization("java.lang.OutOfMemoryError", 923 os::native_thread_creation_failed_msg()); 924 } 925 } 926 927 os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS) 928 929 return new_thread; 930 } 931 932 static bool trace_compiler_threads() { 933 LogTarget(Debug, jit, thread) lt; 934 return TraceCompilerThreads || lt.is_enabled(); 935 } 936 937 static jobject create_compiler_thread(AbstractCompiler* compiler, int i, TRAPS) { 938 char name_buffer[256]; 939 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", compiler->name(), i); 940 Handle thread_oop = JavaThread::create_system_thread_object(name_buffer, CHECK_NULL); 941 return JNIHandles::make_global(thread_oop); 942 } 943 944 static void print_compiler_threads(stringStream& msg) { 945 if (TraceCompilerThreads) { 946 tty->print_cr("%7d %s", (int)tty->time_stamp().milliseconds(), msg.as_string()); 947 } 948 LogTarget(Debug, jit, thread) lt; 949 if (lt.is_enabled()) { 950 LogStream ls(lt); 951 ls.print_cr("%s", msg.as_string()); 952 } 953 } 954 955 void CompileBroker::init_compiler_threads() { 956 // Ensure any exceptions lead to vm_exit_during_initialization. 957 EXCEPTION_MARK; 958 #if !defined(ZERO) 959 assert(_c2_count > 0 || _c1_count > 0, "No compilers?"); 960 #endif // !ZERO 961 // Initialize the compilation queue 962 if (_c2_count > 0) { 963 const char* name = JVMCI_ONLY(UseJVMCICompiler ? "JVMCI compile queue" :) "C2 compile queue"; 964 _c2_compile_queue = new CompileQueue(name); 965 _compiler2_objects = NEW_C_HEAP_ARRAY(jobject, _c2_count, mtCompiler); 966 _compiler2_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c2_count, mtCompiler); 967 } 968 if (_c1_count > 0) { 969 _c1_compile_queue = new CompileQueue("C1 compile queue"); 970 _compiler1_objects = NEW_C_HEAP_ARRAY(jobject, _c1_count, mtCompiler); 971 _compiler1_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c1_count, mtCompiler); 972 } 973 974 for (int i = 0; i < _c2_count; i++) { 975 // Create a name for our thread. 976 jobject thread_handle = create_compiler_thread(_compilers[1], i, CHECK); 977 _compiler2_objects[i] = thread_handle; 978 _compiler2_logs[i] = nullptr; 979 980 if (!UseDynamicNumberOfCompilerThreads || i == 0) { 981 JavaThread *ct = make_thread(compiler_t, thread_handle, _c2_compile_queue, _compilers[1], THREAD); 982 assert(ct != nullptr, "should have been handled for initial thread"); 983 _compilers[1]->set_num_compiler_threads(i + 1); 984 if (trace_compiler_threads()) { 985 ResourceMark rm; 986 ThreadsListHandle tlh; // name() depends on the TLH. 987 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 988 stringStream msg; 989 msg.print("Added initial compiler thread %s", ct->name()); 990 print_compiler_threads(msg); 991 } 992 } 993 } 994 995 for (int i = 0; i < _c1_count; i++) { 996 // Create a name for our thread. 997 jobject thread_handle = create_compiler_thread(_compilers[0], i, CHECK); 998 _compiler1_objects[i] = thread_handle; 999 _compiler1_logs[i] = nullptr; 1000 1001 if (!UseDynamicNumberOfCompilerThreads || i == 0) { 1002 JavaThread *ct = make_thread(compiler_t, thread_handle, _c1_compile_queue, _compilers[0], THREAD); 1003 assert(ct != nullptr, "should have been handled for initial thread"); 1004 _compilers[0]->set_num_compiler_threads(i + 1); 1005 if (trace_compiler_threads()) { 1006 ResourceMark rm; 1007 ThreadsListHandle tlh; // name() depends on the TLH. 1008 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1009 stringStream msg; 1010 msg.print("Added initial compiler thread %s", ct->name()); 1011 print_compiler_threads(msg); 1012 } 1013 } 1014 } 1015 1016 if (UsePerfData) { 1017 PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, _c1_count + _c2_count, CHECK); 1018 } 1019 1020 #if defined(ASSERT) && COMPILER2_OR_JVMCI 1021 if (DeoptimizeObjectsALot) { 1022 // Initialize and start the object deoptimizer threads 1023 const int total_count = DeoptimizeObjectsALotThreadCountSingle + DeoptimizeObjectsALotThreadCountAll; 1024 for (int count = 0; count < total_count; count++) { 1025 Handle thread_oop = JavaThread::create_system_thread_object("Deoptimize objects a lot single mode", CHECK); 1026 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); 1027 make_thread(deoptimizer_t, thread_handle, nullptr, nullptr, THREAD); 1028 } 1029 } 1030 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI 1031 } 1032 1033 void CompileBroker::init_training_replay() { 1034 // Ensure any exceptions lead to vm_exit_during_initialization. 1035 EXCEPTION_MARK; 1036 if (TrainingData::have_data()) { 1037 Handle thread_oop = JavaThread::create_system_thread_object("Training replay thread", CHECK); 1038 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); 1039 make_thread(training_replay_t, thread_handle, nullptr, nullptr, THREAD); 1040 } 1041 } 1042 1043 void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) { 1044 1045 int old_c2_count = 0, new_c2_count = 0, old_c1_count = 0, new_c1_count = 0; 1046 const int c2_tasks_per_thread = 2, c1_tasks_per_thread = 4; 1047 1048 // Quick check if we already have enough compiler threads without taking the lock. 1049 // Numbers may change concurrently, so we read them again after we have the lock. 1050 if (_c2_compile_queue != nullptr) { 1051 old_c2_count = get_c2_thread_count(); 1052 new_c2_count = MIN2(_c2_count, _c2_compile_queue->size() / c2_tasks_per_thread); 1053 } 1054 if (_c1_compile_queue != nullptr) { 1055 old_c1_count = get_c1_thread_count(); 1056 new_c1_count = MIN2(_c1_count, _c1_compile_queue->size() / c1_tasks_per_thread); 1057 } 1058 if (new_c2_count <= old_c2_count && new_c1_count <= old_c1_count) return; 1059 1060 // Now, we do the more expensive operations. 1061 julong free_memory = os::free_memory(); 1062 // If SegmentedCodeCache is off, both values refer to the single heap (with type CodeBlobType::All). 1063 size_t available_cc_np = CodeCache::unallocated_capacity(CodeBlobType::MethodNonProfiled), 1064 available_cc_p = CodeCache::unallocated_capacity(CodeBlobType::MethodProfiled); 1065 1066 // Only attempt to start additional threads if the lock is free. 1067 if (!CompileThread_lock->try_lock()) return; 1068 1069 if (_c2_compile_queue != nullptr) { 1070 old_c2_count = get_c2_thread_count(); 1071 new_c2_count = MIN4(_c2_count, 1072 _c2_compile_queue->size() / c2_tasks_per_thread, 1073 (int)(free_memory / (200*M)), 1074 (int)(available_cc_np / (128*K))); 1075 1076 for (int i = old_c2_count; i < new_c2_count; i++) { 1077 #if INCLUDE_JVMCI 1078 if (UseJVMCICompiler && !UseJVMCINativeLibrary && _compiler2_objects[i] == nullptr) { 1079 // Native compiler threads as used in C1/C2 can reuse the j.l.Thread objects as their 1080 // existence is completely hidden from the rest of the VM (and those compiler threads can't 1081 // call Java code to do the creation anyway). 1082 // 1083 // For pure Java JVMCI we have to create new j.l.Thread objects as they are visible and we 1084 // can see unexpected thread lifecycle transitions if we bind them to new JavaThreads. For 1085 // native library JVMCI it's preferred to use the C1/C2 strategy as this avoids unnecessary 1086 // coupling with Java. 1087 if (!THREAD->can_call_java()) break; 1088 char name_buffer[256]; 1089 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", _compilers[1]->name(), i); 1090 Handle thread_oop; 1091 { 1092 // We have to give up the lock temporarily for the Java calls. 1093 MutexUnlocker mu(CompileThread_lock); 1094 thread_oop = JavaThread::create_system_thread_object(name_buffer, THREAD); 1095 } 1096 if (HAS_PENDING_EXCEPTION) { 1097 if (trace_compiler_threads()) { 1098 ResourceMark rm; 1099 stringStream msg; 1100 msg.print_cr("JVMCI compiler thread creation failed:"); 1101 PENDING_EXCEPTION->print_on(&msg); 1102 print_compiler_threads(msg); 1103 } 1104 CLEAR_PENDING_EXCEPTION; 1105 break; 1106 } 1107 // Check if another thread has beaten us during the Java calls. 1108 if (get_c2_thread_count() != i) break; 1109 jobject thread_handle = JNIHandles::make_global(thread_oop); 1110 assert(compiler2_object(i) == nullptr, "Old one must be released!"); 1111 _compiler2_objects[i] = thread_handle; 1112 } 1113 #endif 1114 guarantee(compiler2_object(i) != nullptr, "Thread oop must exist"); 1115 JavaThread *ct = make_thread(compiler_t, compiler2_object(i), _c2_compile_queue, _compilers[1], THREAD); 1116 if (ct == nullptr) break; 1117 _compilers[1]->set_num_compiler_threads(i + 1); 1118 if (trace_compiler_threads()) { 1119 ResourceMark rm; 1120 ThreadsListHandle tlh; // name() depends on the TLH. 1121 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1122 stringStream msg; 1123 msg.print("Added compiler thread %s (free memory: %dMB, available non-profiled code cache: %dMB)", 1124 ct->name(), (int)(free_memory/M), (int)(available_cc_np/M)); 1125 print_compiler_threads(msg); 1126 } 1127 } 1128 } 1129 1130 if (_c1_compile_queue != nullptr) { 1131 old_c1_count = get_c1_thread_count(); 1132 new_c1_count = MIN4(_c1_count, 1133 _c1_compile_queue->size() / c1_tasks_per_thread, 1134 (int)(free_memory / (100*M)), 1135 (int)(available_cc_p / (128*K))); 1136 1137 for (int i = old_c1_count; i < new_c1_count; i++) { 1138 JavaThread *ct = make_thread(compiler_t, compiler1_object(i), _c1_compile_queue, _compilers[0], THREAD); 1139 if (ct == nullptr) break; 1140 _compilers[0]->set_num_compiler_threads(i + 1); 1141 if (trace_compiler_threads()) { 1142 ResourceMark rm; 1143 ThreadsListHandle tlh; // name() depends on the TLH. 1144 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1145 stringStream msg; 1146 msg.print("Added compiler thread %s (free memory: %dMB, available profiled code cache: %dMB)", 1147 ct->name(), (int)(free_memory/M), (int)(available_cc_p/M)); 1148 print_compiler_threads(msg); 1149 } 1150 } 1151 } 1152 1153 CompileThread_lock->unlock(); 1154 } 1155 1156 1157 /** 1158 * Set the methods on the stack as on_stack so that redefine classes doesn't 1159 * reclaim them. This method is executed at a safepoint. 1160 */ 1161 void CompileBroker::mark_on_stack() { 1162 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 1163 // Since we are at a safepoint, we do not need a lock to access 1164 // the compile queues. 1165 if (_c2_compile_queue != nullptr) { 1166 _c2_compile_queue->mark_on_stack(); 1167 } 1168 if (_c1_compile_queue != nullptr) { 1169 _c1_compile_queue->mark_on_stack(); 1170 } 1171 } 1172 1173 // ------------------------------------------------------------------ 1174 // CompileBroker::compile_method 1175 // 1176 // Request compilation of a method. 1177 void CompileBroker::compile_method_base(const methodHandle& method, 1178 int osr_bci, 1179 int comp_level, 1180 int hot_count, 1181 CompileTask::CompileReason compile_reason, 1182 bool blocking, 1183 Thread* thread) { 1184 guarantee(!method->is_abstract(), "cannot compile abstract methods"); 1185 assert(method->method_holder()->is_instance_klass(), 1186 "sanity check"); 1187 assert(!method->method_holder()->is_not_initialized(), 1188 "method holder must be initialized"); 1189 assert(!method->is_method_handle_intrinsic(), "do not enqueue these guys"); 1190 1191 if (CIPrintRequests) { 1192 tty->print("request: "); 1193 method->print_short_name(tty); 1194 if (osr_bci != InvocationEntryBci) { 1195 tty->print(" osr_bci: %d", osr_bci); 1196 } 1197 tty->print(" level: %d comment: %s count: %d", comp_level, CompileTask::reason_name(compile_reason), hot_count); 1198 if (hot_count > 0) { 1199 tty->print(" hot: yes"); 1200 } 1201 tty->cr(); 1202 } 1203 1204 // A request has been made for compilation. Before we do any 1205 // real work, check to see if the method has been compiled 1206 // in the meantime with a definitive result. 1207 if (compilation_is_complete(method, osr_bci, comp_level)) { 1208 return; 1209 } 1210 1211 #ifndef PRODUCT 1212 if (osr_bci != -1 && !FLAG_IS_DEFAULT(OSROnlyBCI)) { 1213 if ((OSROnlyBCI > 0) ? (OSROnlyBCI != osr_bci) : (-OSROnlyBCI == osr_bci)) { 1214 // Positive OSROnlyBCI means only compile that bci. Negative means don't compile that BCI. 1215 return; 1216 } 1217 } 1218 #endif 1219 1220 // If this method is already in the compile queue, then 1221 // we do not block the current thread. 1222 if (compilation_is_in_queue(method)) { 1223 // We may want to decay our counter a bit here to prevent 1224 // multiple denied requests for compilation. This is an 1225 // open compilation policy issue. Note: The other possibility, 1226 // in the case that this is a blocking compile request, is to have 1227 // all subsequent blocking requesters wait for completion of 1228 // ongoing compiles. Note that in this case we'll need a protocol 1229 // for freeing the associated compile tasks. [Or we could have 1230 // a single static monitor on which all these waiters sleep.] 1231 return; 1232 } 1233 1234 // Tiered policy requires MethodCounters to exist before adding a method to 1235 // the queue. Create if we don't have them yet. 1236 method->get_method_counters(thread); 1237 1238 // Outputs from the following MutexLocker block: 1239 CompileTask* task = nullptr; 1240 CompileQueue* queue = compile_queue(comp_level); 1241 1242 // Acquire our lock. 1243 { 1244 MutexLocker locker(thread, MethodCompileQueue_lock); 1245 1246 // Make sure the method has not slipped into the queues since 1247 // last we checked; note that those checks were "fast bail-outs". 1248 // Here we need to be more careful, see 14012000 below. 1249 if (compilation_is_in_queue(method)) { 1250 return; 1251 } 1252 1253 // We need to check again to see if the compilation has 1254 // completed. A previous compilation may have registered 1255 // some result. 1256 if (compilation_is_complete(method, osr_bci, comp_level)) { 1257 return; 1258 } 1259 1260 // We now know that this compilation is not pending, complete, 1261 // or prohibited. Assign a compile_id to this compilation 1262 // and check to see if it is in our [Start..Stop) range. 1263 int compile_id = assign_compile_id(method, osr_bci); 1264 if (compile_id == 0) { 1265 // The compilation falls outside the allowed range. 1266 return; 1267 } 1268 1269 #if INCLUDE_JVMCI 1270 if (UseJVMCICompiler && blocking) { 1271 // Don't allow blocking compiles for requests triggered by JVMCI. 1272 if (thread->is_Compiler_thread()) { 1273 blocking = false; 1274 } 1275 1276 // In libjvmci, JVMCI initialization should not deadlock with other threads 1277 if (!UseJVMCINativeLibrary) { 1278 // Don't allow blocking compiles if inside a class initializer or while performing class loading 1279 vframeStream vfst(JavaThread::cast(thread)); 1280 for (; !vfst.at_end(); vfst.next()) { 1281 if (vfst.method()->is_static_initializer() || 1282 (vfst.method()->method_holder()->is_subclass_of(vmClasses::ClassLoader_klass()) && 1283 vfst.method()->name() == vmSymbols::loadClass_name())) { 1284 blocking = false; 1285 break; 1286 } 1287 } 1288 1289 // Don't allow blocking compilation requests to JVMCI 1290 // if JVMCI itself is not yet initialized 1291 if (!JVMCI::is_compiler_initialized() && compiler(comp_level)->is_jvmci()) { 1292 blocking = false; 1293 } 1294 } 1295 1296 // Don't allow blocking compilation requests if we are in JVMCIRuntime::shutdown 1297 // to avoid deadlock between compiler thread(s) and threads run at shutdown 1298 // such as the DestroyJavaVM thread. 1299 if (JVMCI::in_shutdown()) { 1300 blocking = false; 1301 } 1302 } 1303 #endif // INCLUDE_JVMCI 1304 1305 // We will enter the compilation in the queue. 1306 // 14012000: Note that this sets the queued_for_compile bits in 1307 // the target method. We can now reason that a method cannot be 1308 // queued for compilation more than once, as follows: 1309 // Before a thread queues a task for compilation, it first acquires 1310 // the compile queue lock, then checks if the method's queued bits 1311 // are set or it has already been compiled. Thus there can not be two 1312 // instances of a compilation task for the same method on the 1313 // compilation queue. Consider now the case where the compilation 1314 // thread has already removed a task for that method from the queue 1315 // and is in the midst of compiling it. In this case, the 1316 // queued_for_compile bits must be set in the method (and these 1317 // will be visible to the current thread, since the bits were set 1318 // under protection of the compile queue lock, which we hold now. 1319 // When the compilation completes, the compiler thread first sets 1320 // the compilation result and then clears the queued_for_compile 1321 // bits. Neither of these actions are protected by a barrier (or done 1322 // under the protection of a lock), so the only guarantee we have 1323 // (on machines with TSO (Total Store Order)) is that these values 1324 // will update in that order. As a result, the only combinations of 1325 // these bits that the current thread will see are, in temporal order: 1326 // <RESULT, QUEUE> : 1327 // <0, 1> : in compile queue, but not yet compiled 1328 // <1, 1> : compiled but queue bit not cleared 1329 // <1, 0> : compiled and queue bit cleared 1330 // Because we first check the queue bits then check the result bits, 1331 // we are assured that we cannot introduce a duplicate task. 1332 // Note that if we did the tests in the reverse order (i.e. check 1333 // result then check queued bit), we could get the result bit before 1334 // the compilation completed, and the queue bit after the compilation 1335 // completed, and end up introducing a "duplicate" (redundant) task. 1336 // In that case, the compiler thread should first check if a method 1337 // has already been compiled before trying to compile it. 1338 // NOTE: in the event that there are multiple compiler threads and 1339 // there is de-optimization/recompilation, things will get hairy, 1340 // and in that case it's best to protect both the testing (here) of 1341 // these bits, and their updating (here and elsewhere) under a 1342 // common lock. 1343 task = create_compile_task(queue, 1344 compile_id, method, 1345 osr_bci, comp_level, 1346 hot_count, compile_reason, 1347 blocking); 1348 } 1349 1350 if (blocking) { 1351 wait_for_completion(task); 1352 } 1353 } 1354 1355 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, 1356 int comp_level, 1357 int hot_count, 1358 CompileTask::CompileReason compile_reason, 1359 TRAPS) { 1360 // Do nothing if compilebroker is not initialized or compiles are submitted on level none 1361 if (!_initialized || comp_level == CompLevel_none) { 1362 return nullptr; 1363 } 1364 1365 AbstractCompiler *comp = CompileBroker::compiler(comp_level); 1366 assert(comp != nullptr, "Ensure we have a compiler"); 1367 1368 #if INCLUDE_JVMCI 1369 if (comp->is_jvmci() && !JVMCI::can_initialize_JVMCI()) { 1370 // JVMCI compilation is not yet initializable. 1371 return nullptr; 1372 } 1373 #endif 1374 1375 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp); 1376 // CompileBroker::compile_method can trap and can have pending async exception. 1377 nmethod* nm = CompileBroker::compile_method(method, osr_bci, comp_level, hot_count, compile_reason, directive, THREAD); 1378 DirectivesStack::release(directive); 1379 return nm; 1380 } 1381 1382 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, 1383 int comp_level, 1384 int hot_count, 1385 CompileTask::CompileReason compile_reason, 1386 DirectiveSet* directive, 1387 TRAPS) { 1388 1389 // make sure arguments make sense 1390 assert(method->method_holder()->is_instance_klass(), "not an instance method"); 1391 assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range"); 1392 assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods"); 1393 assert(!method->method_holder()->is_not_initialized(), "method holder must be initialized"); 1394 // return quickly if possible 1395 1396 // lock, make sure that the compilation 1397 // isn't prohibited in a straightforward way. 1398 AbstractCompiler* comp = CompileBroker::compiler(comp_level); 1399 if (comp == nullptr || compilation_is_prohibited(method, osr_bci, comp_level, directive->ExcludeOption)) { 1400 return nullptr; 1401 } 1402 1403 if (osr_bci == InvocationEntryBci) { 1404 // standard compilation 1405 nmethod* method_code = method->code(); 1406 if (method_code != nullptr) { 1407 if (compilation_is_complete(method, osr_bci, comp_level)) { 1408 return method_code; 1409 } 1410 } 1411 if (method->is_not_compilable(comp_level)) { 1412 return nullptr; 1413 } 1414 } else { 1415 // osr compilation 1416 // We accept a higher level osr method 1417 nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false); 1418 if (nm != nullptr) return nm; 1419 if (method->is_not_osr_compilable(comp_level)) return nullptr; 1420 } 1421 1422 assert(!HAS_PENDING_EXCEPTION, "No exception should be present"); 1423 // some prerequisites that are compiler specific 1424 if (comp->is_c2() || comp->is_jvmci()) { 1425 InternalOOMEMark iom(THREAD); 1426 method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NONASYNC_NULL); 1427 // Resolve all classes seen in the signature of the method 1428 // we are compiling. 1429 Method::load_signature_classes(method, CHECK_AND_CLEAR_NONASYNC_NULL); 1430 } 1431 1432 // If the method is native, do the lookup in the thread requesting 1433 // the compilation. Native lookups can load code, which is not 1434 // permitted during compilation. 1435 // 1436 // Note: A native method implies non-osr compilation which is 1437 // checked with an assertion at the entry of this method. 1438 if (method->is_native() && !method->is_method_handle_intrinsic()) { 1439 address adr = NativeLookup::lookup(method, THREAD); 1440 if (HAS_PENDING_EXCEPTION) { 1441 // In case of an exception looking up the method, we just forget 1442 // about it. The interpreter will kick-in and throw the exception. 1443 method->set_not_compilable("NativeLookup::lookup failed"); // implies is_not_osr_compilable() 1444 CLEAR_PENDING_EXCEPTION; 1445 return nullptr; 1446 } 1447 assert(method->has_native_function(), "must have native code by now"); 1448 } 1449 1450 // RedefineClasses() has replaced this method; just return 1451 if (method->is_old()) { 1452 return nullptr; 1453 } 1454 1455 // JVMTI -- post_compile_event requires jmethod_id() that may require 1456 // a lock the compiling thread can not acquire. Prefetch it here. 1457 if (JvmtiExport::should_post_compiled_method_load()) { 1458 method->jmethod_id(); 1459 } 1460 1461 // do the compilation 1462 if (method->is_native()) { 1463 if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) { 1464 // To properly handle the appendix argument for out-of-line calls we are using a small trampoline that 1465 // pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime). 1466 // 1467 // Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter 1468 // in this case. If we can't generate one and use it we can not execute the out-of-line method handle calls. 1469 AdapterHandlerLibrary::create_native_wrapper(method); 1470 } else { 1471 return nullptr; 1472 } 1473 } else { 1474 // If the compiler is shut off due to code cache getting full 1475 // fail out now so blocking compiles dont hang the java thread 1476 if (!should_compile_new_jobs()) { 1477 return nullptr; 1478 } 1479 bool is_blocking = !directive->BackgroundCompilationOption || ReplayCompiles; 1480 compile_method_base(method, osr_bci, comp_level, hot_count, compile_reason, is_blocking, THREAD); 1481 } 1482 1483 // return requested nmethod 1484 // We accept a higher level osr method 1485 if (osr_bci == InvocationEntryBci) { 1486 return method->code(); 1487 } 1488 return method->lookup_osr_nmethod_for(osr_bci, comp_level, false); 1489 } 1490 1491 1492 // ------------------------------------------------------------------ 1493 // CompileBroker::compilation_is_complete 1494 // 1495 // See if compilation of this method is already complete. 1496 bool CompileBroker::compilation_is_complete(const methodHandle& method, 1497 int osr_bci, 1498 int comp_level) { 1499 bool is_osr = (osr_bci != standard_entry_bci); 1500 if (is_osr) { 1501 if (method->is_not_osr_compilable(comp_level)) { 1502 return true; 1503 } else { 1504 nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true); 1505 return (result != nullptr); 1506 } 1507 } else { 1508 if (method->is_not_compilable(comp_level)) { 1509 return true; 1510 } else { 1511 nmethod* result = method->code(); 1512 if (result == nullptr) return false; 1513 return comp_level == result->comp_level(); 1514 } 1515 } 1516 } 1517 1518 1519 /** 1520 * See if this compilation is already requested. 1521 * 1522 * Implementation note: there is only a single "is in queue" bit 1523 * for each method. This means that the check below is overly 1524 * conservative in the sense that an osr compilation in the queue 1525 * will block a normal compilation from entering the queue (and vice 1526 * versa). This can be remedied by a full queue search to disambiguate 1527 * cases. If it is deemed profitable, this may be done. 1528 */ 1529 bool CompileBroker::compilation_is_in_queue(const methodHandle& method) { 1530 return method->queued_for_compilation(); 1531 } 1532 1533 // ------------------------------------------------------------------ 1534 // CompileBroker::compilation_is_prohibited 1535 // 1536 // See if this compilation is not allowed. 1537 bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded) { 1538 bool is_native = method->is_native(); 1539 // Some compilers may not support the compilation of natives. 1540 AbstractCompiler *comp = compiler(comp_level); 1541 if (is_native && (!CICompileNatives || comp == nullptr)) { 1542 method->set_not_compilable_quietly("native methods not supported", comp_level); 1543 return true; 1544 } 1545 1546 bool is_osr = (osr_bci != standard_entry_bci); 1547 // Some compilers may not support on stack replacement. 1548 if (is_osr && (!CICompileOSR || comp == nullptr)) { 1549 method->set_not_osr_compilable("OSR not supported", comp_level); 1550 return true; 1551 } 1552 1553 // The method may be explicitly excluded by the user. 1554 double scale; 1555 if (excluded || (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, scale) && scale == 0)) { 1556 bool quietly = CompilerOracle::be_quiet(); 1557 if (PrintCompilation && !quietly) { 1558 // This does not happen quietly... 1559 ResourceMark rm; 1560 tty->print("### Excluding %s:%s", 1561 method->is_native() ? "generation of native wrapper" : "compile", 1562 (method->is_static() ? " static" : "")); 1563 method->print_short_name(tty); 1564 tty->cr(); 1565 } 1566 method->set_not_compilable("excluded by CompileCommand", comp_level, !quietly); 1567 } 1568 1569 return false; 1570 } 1571 1572 /** 1573 * Generate serialized IDs for compilation requests. If certain debugging flags are used 1574 * and the ID is not within the specified range, the method is not compiled and 0 is returned. 1575 * The function also allows to generate separate compilation IDs for OSR compilations. 1576 */ 1577 int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) { 1578 #ifdef ASSERT 1579 bool is_osr = (osr_bci != standard_entry_bci); 1580 int id; 1581 if (method->is_native()) { 1582 assert(!is_osr, "can't be osr"); 1583 // Adapters, native wrappers and method handle intrinsics 1584 // should be generated always. 1585 return Atomic::add(CICountNative ? &_native_compilation_id : &_compilation_id, 1); 1586 } else if (CICountOSR && is_osr) { 1587 id = Atomic::add(&_osr_compilation_id, 1); 1588 if (CIStartOSR <= id && id < CIStopOSR) { 1589 return id; 1590 } 1591 } else { 1592 id = Atomic::add(&_compilation_id, 1); 1593 if (CIStart <= id && id < CIStop) { 1594 return id; 1595 } 1596 } 1597 1598 // Method was not in the appropriate compilation range. 1599 method->set_not_compilable_quietly("Not in requested compile id range"); 1600 return 0; 1601 #else 1602 // CICountOSR is a develop flag and set to 'false' by default. In a product built, 1603 // only _compilation_id is incremented. 1604 return Atomic::add(&_compilation_id, 1); 1605 #endif 1606 } 1607 1608 // ------------------------------------------------------------------ 1609 // CompileBroker::assign_compile_id_unlocked 1610 // 1611 // Public wrapper for assign_compile_id that acquires the needed locks 1612 int CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci) { 1613 MutexLocker locker(thread, MethodCompileQueue_lock); 1614 return assign_compile_id(method, osr_bci); 1615 } 1616 1617 // ------------------------------------------------------------------ 1618 // CompileBroker::create_compile_task 1619 // 1620 // Create a CompileTask object representing the current request for 1621 // compilation. Add this task to the queue. 1622 CompileTask* CompileBroker::create_compile_task(CompileQueue* queue, 1623 int compile_id, 1624 const methodHandle& method, 1625 int osr_bci, 1626 int comp_level, 1627 int hot_count, 1628 CompileTask::CompileReason compile_reason, 1629 bool blocking) { 1630 CompileTask* new_task = CompileTask::allocate(); 1631 new_task->initialize(compile_id, method, osr_bci, comp_level, 1632 hot_count, compile_reason, 1633 blocking); 1634 queue->add(new_task); 1635 return new_task; 1636 } 1637 1638 #if INCLUDE_JVMCI 1639 // The number of milliseconds to wait before checking if 1640 // JVMCI compilation has made progress. 1641 static const long JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE = 1000; 1642 1643 // The number of JVMCI compilation progress checks that must fail 1644 // before unblocking a thread waiting for a blocking compilation. 1645 static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10; 1646 1647 /** 1648 * Waits for a JVMCI compiler to complete a given task. This thread 1649 * waits until either the task completes or it sees no JVMCI compilation 1650 * progress for N consecutive milliseconds where N is 1651 * JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE * 1652 * JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS. 1653 * 1654 * @return true if this thread needs to free/recycle the task 1655 */ 1656 bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) { 1657 assert(UseJVMCICompiler, "sanity"); 1658 MonitorLocker ml(thread, task->lock()); 1659 int progress_wait_attempts = 0; 1660 jint thread_jvmci_compilation_ticks = 0; 1661 jint global_jvmci_compilation_ticks = jvmci->global_compilation_ticks(); 1662 while (!task->is_complete() && !is_compilation_disabled_forever() && 1663 ml.wait(JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE)) { 1664 JVMCICompileState* jvmci_compile_state = task->blocking_jvmci_compile_state(); 1665 1666 bool progress; 1667 if (jvmci_compile_state != nullptr) { 1668 jint ticks = jvmci_compile_state->compilation_ticks(); 1669 progress = (ticks - thread_jvmci_compilation_ticks) != 0; 1670 JVMCI_event_1("waiting on compilation %d [ticks=%d]", task->compile_id(), ticks); 1671 thread_jvmci_compilation_ticks = ticks; 1672 } else { 1673 // Still waiting on JVMCI compiler queue. This thread may be holding a lock 1674 // that all JVMCI compiler threads are blocked on. We use the global JVMCI 1675 // compilation ticks to determine whether JVMCI compilation 1676 // is still making progress through the JVMCI compiler queue. 1677 jint ticks = jvmci->global_compilation_ticks(); 1678 progress = (ticks - global_jvmci_compilation_ticks) != 0; 1679 JVMCI_event_1("waiting on compilation %d to be queued [ticks=%d]", task->compile_id(), ticks); 1680 global_jvmci_compilation_ticks = ticks; 1681 } 1682 1683 if (!progress) { 1684 if (++progress_wait_attempts == JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS) { 1685 if (PrintCompilation) { 1686 task->print(tty, "wait for blocking compilation timed out"); 1687 } 1688 JVMCI_event_1("waiting on compilation %d timed out", task->compile_id()); 1689 break; 1690 } 1691 } else { 1692 progress_wait_attempts = 0; 1693 } 1694 } 1695 task->clear_waiter(); 1696 return task->is_complete(); 1697 } 1698 #endif 1699 1700 /** 1701 * Wait for the compilation task to complete. 1702 */ 1703 void CompileBroker::wait_for_completion(CompileTask* task) { 1704 if (CIPrintCompileQueue) { 1705 ttyLocker ttyl; 1706 tty->print_cr("BLOCKING FOR COMPILE"); 1707 } 1708 1709 assert(task->is_blocking(), "can only wait on blocking task"); 1710 1711 JavaThread* thread = JavaThread::current(); 1712 1713 methodHandle method(thread, task->method()); 1714 bool free_task; 1715 #if INCLUDE_JVMCI 1716 AbstractCompiler* comp = compiler(task->comp_level()); 1717 if (!UseJVMCINativeLibrary && comp->is_jvmci() && !task->should_wait_for_compilation()) { 1718 // It may return before compilation is completed. 1719 // Note that libjvmci should not pre-emptively unblock 1720 // a thread waiting for a compilation as it does not call 1721 // Java code and so is not deadlock prone like jarjvmci. 1722 free_task = wait_for_jvmci_completion((JVMCICompiler*) comp, task, thread); 1723 } else 1724 #endif 1725 { 1726 MonitorLocker ml(thread, task->lock()); 1727 free_task = true; 1728 task->inc_waiting_for_completion(); 1729 while (!task->is_complete() && !is_compilation_disabled_forever()) { 1730 ml.wait(); 1731 } 1732 task->dec_waiting_for_completion(); 1733 } 1734 1735 if (free_task) { 1736 if (is_compilation_disabled_forever()) { 1737 CompileTask::free(task); 1738 return; 1739 } 1740 1741 // It is harmless to check this status without the lock, because 1742 // completion is a stable property (until the task object is recycled). 1743 assert(task->is_complete(), "Compilation should have completed"); 1744 1745 // By convention, the waiter is responsible for recycling a 1746 // blocking CompileTask. Since there is only one waiter ever 1747 // waiting on a CompileTask, we know that no one else will 1748 // be using this CompileTask; we can free it. 1749 CompileTask::free(task); 1750 } 1751 } 1752 1753 /** 1754 * Initialize compiler thread(s) + compiler object(s). The postcondition 1755 * of this function is that the compiler runtimes are initialized and that 1756 * compiler threads can start compiling. 1757 */ 1758 bool CompileBroker::init_compiler_runtime() { 1759 CompilerThread* thread = CompilerThread::current(); 1760 AbstractCompiler* comp = thread->compiler(); 1761 // Final sanity check - the compiler object must exist 1762 guarantee(comp != nullptr, "Compiler object must exist"); 1763 1764 { 1765 // Must switch to native to allocate ci_env 1766 ThreadToNativeFromVM ttn(thread); 1767 ciEnv ci_env((CompileTask*)nullptr); 1768 // Cache Jvmti state 1769 ci_env.cache_jvmti_state(); 1770 // Cache DTrace flags 1771 ci_env.cache_dtrace_flags(); 1772 1773 // Switch back to VM state to do compiler initialization 1774 ThreadInVMfromNative tv(thread); 1775 1776 // Perform per-thread and global initializations 1777 comp->initialize(); 1778 } 1779 1780 if (comp->is_failed()) { 1781 disable_compilation_forever(); 1782 // If compiler initialization failed, no compiler thread that is specific to a 1783 // particular compiler runtime will ever start to compile methods. 1784 shutdown_compiler_runtime(comp, thread); 1785 return false; 1786 } 1787 1788 // C1 specific check 1789 if (comp->is_c1() && (thread->get_buffer_blob() == nullptr)) { 1790 warning("Initialization of %s thread failed (no space to run compilers)", thread->name()); 1791 return false; 1792 } 1793 1794 return true; 1795 } 1796 1797 void CompileBroker::free_buffer_blob_if_allocated(CompilerThread* thread) { 1798 BufferBlob* blob = thread->get_buffer_blob(); 1799 if (blob != nullptr) { 1800 blob->purge(); 1801 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1802 CodeCache::free(blob); 1803 } 1804 } 1805 1806 /** 1807 * If C1 and/or C2 initialization failed, we shut down all compilation. 1808 * We do this to keep things simple. This can be changed if it ever turns 1809 * out to be a problem. 1810 */ 1811 void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) { 1812 free_buffer_blob_if_allocated(thread); 1813 1814 if (comp->should_perform_shutdown()) { 1815 // There are two reasons for shutting down the compiler 1816 // 1) compiler runtime initialization failed 1817 // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing 1818 warning("%s initialization failed. Shutting down all compilers", comp->name()); 1819 1820 // Only one thread per compiler runtime object enters here 1821 // Set state to shut down 1822 comp->set_shut_down(); 1823 1824 // Delete all queued compilation tasks to make compiler threads exit faster. 1825 if (_c1_compile_queue != nullptr) { 1826 _c1_compile_queue->free_all(); 1827 } 1828 1829 if (_c2_compile_queue != nullptr) { 1830 _c2_compile_queue->free_all(); 1831 } 1832 1833 // Set flags so that we continue execution with using interpreter only. 1834 UseCompiler = false; 1835 UseInterpreter = true; 1836 1837 // We could delete compiler runtimes also. However, there are references to 1838 // the compiler runtime(s) (e.g., nmethod::is_compiled_by_c1()) which then 1839 // fail. This can be done later if necessary. 1840 } 1841 } 1842 1843 /** 1844 * Helper function to create new or reuse old CompileLog. 1845 */ 1846 CompileLog* CompileBroker::get_log(CompilerThread* ct) { 1847 if (!LogCompilation) return nullptr; 1848 1849 AbstractCompiler *compiler = ct->compiler(); 1850 bool c1 = compiler->is_c1(); 1851 jobject* compiler_objects = c1 ? _compiler1_objects : _compiler2_objects; 1852 assert(compiler_objects != nullptr, "must be initialized at this point"); 1853 CompileLog** logs = c1 ? _compiler1_logs : _compiler2_logs; 1854 assert(logs != nullptr, "must be initialized at this point"); 1855 int count = c1 ? _c1_count : _c2_count; 1856 1857 // Find Compiler number by its threadObj. 1858 oop compiler_obj = ct->threadObj(); 1859 int compiler_number = 0; 1860 bool found = false; 1861 for (; compiler_number < count; compiler_number++) { 1862 if (JNIHandles::resolve_non_null(compiler_objects[compiler_number]) == compiler_obj) { 1863 found = true; 1864 break; 1865 } 1866 } 1867 assert(found, "Compiler must exist at this point"); 1868 1869 // Determine pointer for this thread's log. 1870 CompileLog** log_ptr = &logs[compiler_number]; 1871 1872 // Return old one if it exists. 1873 CompileLog* log = *log_ptr; 1874 if (log != nullptr) { 1875 ct->init_log(log); 1876 return log; 1877 } 1878 1879 // Create a new one and remember it. 1880 init_compiler_thread_log(); 1881 log = ct->log(); 1882 *log_ptr = log; 1883 return log; 1884 } 1885 1886 // ------------------------------------------------------------------ 1887 // CompileBroker::compiler_thread_loop 1888 // 1889 // The main loop run by a CompilerThread. 1890 void CompileBroker::compiler_thread_loop() { 1891 CompilerThread* thread = CompilerThread::current(); 1892 CompileQueue* queue = thread->queue(); 1893 // For the thread that initializes the ciObjectFactory 1894 // this resource mark holds all the shared objects 1895 ResourceMark rm; 1896 1897 // First thread to get here will initialize the compiler interface 1898 1899 { 1900 ASSERT_IN_VM; 1901 MutexLocker only_one (thread, CompileThread_lock); 1902 if (!ciObjectFactory::is_initialized()) { 1903 ciObjectFactory::initialize(); 1904 } 1905 } 1906 1907 // Open a log. 1908 CompileLog* log = get_log(thread); 1909 if (log != nullptr) { 1910 log->begin_elem("start_compile_thread name='%s' thread='%zu' process='%d'", 1911 thread->name(), 1912 os::current_thread_id(), 1913 os::current_process_id()); 1914 log->stamp(); 1915 log->end_elem(); 1916 } 1917 1918 // If compiler thread/runtime initialization fails, exit the compiler thread 1919 if (!init_compiler_runtime()) { 1920 return; 1921 } 1922 1923 thread->start_idle_timer(); 1924 1925 // Poll for new compilation tasks as long as the JVM runs. Compilation 1926 // should only be disabled if something went wrong while initializing the 1927 // compiler runtimes. This, in turn, should not happen. The only known case 1928 // when compiler runtime initialization fails is if there is not enough free 1929 // space in the code cache to generate the necessary stubs, etc. 1930 while (!is_compilation_disabled_forever()) { 1931 // We need this HandleMark to avoid leaking VM handles. 1932 HandleMark hm(thread); 1933 1934 CompileTask* task = queue->get(thread); 1935 if (task == nullptr) { 1936 if (UseDynamicNumberOfCompilerThreads) { 1937 // Access compiler_count under lock to enforce consistency. 1938 MutexLocker only_one(CompileThread_lock); 1939 if (can_remove(thread, true)) { 1940 if (trace_compiler_threads()) { 1941 ResourceMark rm; 1942 stringStream msg; 1943 msg.print("Removing compiler thread %s after " JLONG_FORMAT " ms idle time", 1944 thread->name(), thread->idle_time_millis()); 1945 print_compiler_threads(msg); 1946 } 1947 1948 // Notify compiler that the compiler thread is about to stop 1949 thread->compiler()->stopping_compiler_thread(thread); 1950 1951 free_buffer_blob_if_allocated(thread); 1952 return; // Stop this thread. 1953 } 1954 } 1955 } else { 1956 // Assign the task to the current thread. Mark this compilation 1957 // thread as active for the profiler. 1958 // CompileTaskWrapper also keeps the Method* from being deallocated if redefinition 1959 // occurs after fetching the compile task off the queue. 1960 CompileTaskWrapper ctw(task); 1961 methodHandle method(thread, task->method()); 1962 1963 // Never compile a method if breakpoints are present in it 1964 if (method()->number_of_breakpoints() == 0) { 1965 // Compile the method. 1966 if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { 1967 invoke_compiler_on_method(task); 1968 thread->start_idle_timer(); 1969 } else { 1970 // After compilation is disabled, remove remaining methods from queue 1971 method->clear_queued_for_compilation(); 1972 task->set_failure_reason("compilation is disabled"); 1973 } 1974 } else { 1975 task->set_failure_reason("breakpoints are present"); 1976 } 1977 1978 if (UseDynamicNumberOfCompilerThreads) { 1979 possibly_add_compiler_threads(thread); 1980 assert(!thread->has_pending_exception(), "should have been handled"); 1981 } 1982 } 1983 } 1984 1985 // Shut down compiler runtime 1986 shutdown_compiler_runtime(thread->compiler(), thread); 1987 } 1988 1989 // ------------------------------------------------------------------ 1990 // CompileBroker::init_compiler_thread_log 1991 // 1992 // Set up state required by +LogCompilation. 1993 void CompileBroker::init_compiler_thread_log() { 1994 CompilerThread* thread = CompilerThread::current(); 1995 char file_name[4*K]; 1996 FILE* fp = nullptr; 1997 intx thread_id = os::current_thread_id(); 1998 for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) { 1999 const char* dir = (try_temp_dir ? os::get_temp_directory() : nullptr); 2000 if (dir == nullptr) { 2001 jio_snprintf(file_name, sizeof(file_name), "hs_c%zu_pid%u.log", 2002 thread_id, os::current_process_id()); 2003 } else { 2004 jio_snprintf(file_name, sizeof(file_name), 2005 "%s%shs_c%zu_pid%u.log", dir, 2006 os::file_separator(), thread_id, os::current_process_id()); 2007 } 2008 2009 fp = os::fopen(file_name, "wt"); 2010 if (fp != nullptr) { 2011 if (LogCompilation && Verbose) { 2012 tty->print_cr("Opening compilation log %s", file_name); 2013 } 2014 CompileLog* log = new(mtCompiler) CompileLog(file_name, fp, thread_id); 2015 if (log == nullptr) { 2016 fclose(fp); 2017 return; 2018 } 2019 thread->init_log(log); 2020 2021 if (xtty != nullptr) { 2022 ttyLocker ttyl; 2023 // Record any per thread log files 2024 xtty->elem("thread_logfile thread='%zd' filename='%s'", thread_id, file_name); 2025 } 2026 return; 2027 } 2028 } 2029 warning("Cannot open log file: %s", file_name); 2030 } 2031 2032 void CompileBroker::log_metaspace_failure() { 2033 const char* message = "some methods may not be compiled because metaspace " 2034 "is out of memory"; 2035 if (CompilationLog::log() != nullptr) { 2036 CompilationLog::log()->log_metaspace_failure(message); 2037 } 2038 if (PrintCompilation) { 2039 tty->print_cr("COMPILE PROFILING SKIPPED: %s", message); 2040 } 2041 } 2042 2043 2044 // ------------------------------------------------------------------ 2045 // CompileBroker::set_should_block 2046 // 2047 // Set _should_block. 2048 // Call this from the VM, with Threads_lock held and a safepoint requested. 2049 void CompileBroker::set_should_block() { 2050 assert(Threads_lock->owner() == Thread::current(), "must have threads lock"); 2051 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint already"); 2052 #ifndef PRODUCT 2053 if (PrintCompilation && (Verbose || WizardMode)) 2054 tty->print_cr("notifying compiler thread pool to block"); 2055 #endif 2056 _should_block = true; 2057 } 2058 2059 // ------------------------------------------------------------------ 2060 // CompileBroker::maybe_block 2061 // 2062 // Call this from the compiler at convenient points, to poll for _should_block. 2063 void CompileBroker::maybe_block() { 2064 if (_should_block) { 2065 #ifndef PRODUCT 2066 if (PrintCompilation && (Verbose || WizardMode)) 2067 tty->print_cr("compiler thread " INTPTR_FORMAT " poll detects block request", p2i(Thread::current())); 2068 #endif 2069 // If we are executing a task during the request to block, report the task 2070 // before disappearing. 2071 CompilerThread* thread = CompilerThread::current(); 2072 if (thread != nullptr) { 2073 CompileTask* task = thread->task(); 2074 if (task != nullptr) { 2075 if (PrintCompilation) { 2076 task->print(tty, "blocked"); 2077 } 2078 task->print_ul("blocked"); 2079 } 2080 } 2081 // Go to VM state and block for final VM shutdown safepoint. 2082 ThreadInVMfromNative tivfn(JavaThread::current()); 2083 assert(false, "Should never unblock from TIVNM entry"); 2084 } 2085 } 2086 2087 // wrapper for CodeCache::print_summary() 2088 static void codecache_print(bool detailed) 2089 { 2090 stringStream s; 2091 // Dump code cache into a buffer before locking the tty, 2092 { 2093 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2094 CodeCache::print_summary(&s, detailed); 2095 } 2096 ttyLocker ttyl; 2097 tty->print("%s", s.freeze()); 2098 } 2099 2100 // wrapper for CodeCache::print_summary() using outputStream 2101 static void codecache_print(outputStream* out, bool detailed) { 2102 stringStream s; 2103 2104 // Dump code cache into a buffer 2105 { 2106 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2107 CodeCache::print_summary(&s, detailed); 2108 } 2109 2110 char* remaining_log = s.as_string(); 2111 while (*remaining_log != '\0') { 2112 char* eol = strchr(remaining_log, '\n'); 2113 if (eol == nullptr) { 2114 out->print_cr("%s", remaining_log); 2115 remaining_log = remaining_log + strlen(remaining_log); 2116 } else { 2117 *eol = '\0'; 2118 out->print_cr("%s", remaining_log); 2119 remaining_log = eol + 1; 2120 } 2121 } 2122 } 2123 2124 void CompileBroker::handle_compile_error(CompilerThread* thread, CompileTask* task, ciEnv* ci_env, 2125 int compilable, const char* failure_reason) { 2126 if (!AbortVMOnCompilationFailure) { 2127 return; 2128 } 2129 if (compilable == ciEnv::MethodCompilable_not_at_tier) { 2130 fatal("Not compilable at tier %d: %s", task->comp_level(), failure_reason); 2131 } 2132 if (compilable == ciEnv::MethodCompilable_never) { 2133 fatal("Never compilable: %s", failure_reason); 2134 } 2135 } 2136 2137 static void post_compilation_event(EventCompilation& event, CompileTask* task) { 2138 assert(task != nullptr, "invariant"); 2139 CompilerEvent::CompilationEvent::post(event, 2140 task->compile_id(), 2141 task->compiler()->type(), 2142 task->method(), 2143 task->comp_level(), 2144 task->is_success(), 2145 task->osr_bci() != CompileBroker::standard_entry_bci, 2146 task->nm_total_size(), 2147 task->num_inlined_bytecodes(), 2148 task->arena_bytes()); 2149 } 2150 2151 int DirectivesStack::_depth = 0; 2152 CompilerDirectives* DirectivesStack::_top = nullptr; 2153 CompilerDirectives* DirectivesStack::_bottom = nullptr; 2154 2155 // Acquires Compilation_lock and waits for it to be notified 2156 // as long as WhiteBox::compilation_locked is true. 2157 static void whitebox_lock_compilation() { 2158 MonitorLocker locker(Compilation_lock, Mutex::_no_safepoint_check_flag); 2159 while (WhiteBox::compilation_locked) { 2160 locker.wait(); 2161 } 2162 } 2163 2164 // ------------------------------------------------------------------ 2165 // CompileBroker::invoke_compiler_on_method 2166 // 2167 // Compile a method. 2168 // 2169 void CompileBroker::invoke_compiler_on_method(CompileTask* task) { 2170 task->print_ul(); 2171 elapsedTimer time; 2172 2173 DirectiveSet* directive = task->directive(); 2174 if (directive->PrintCompilationOption) { 2175 ResourceMark rm; 2176 task->print_tty(); 2177 } 2178 2179 CompilerThread* thread = CompilerThread::current(); 2180 ResourceMark rm(thread); 2181 2182 if (CompilationLog::log() != nullptr) { 2183 CompilationLog::log()->log_compile(thread, task); 2184 } 2185 2186 // Common flags. 2187 int compile_id = task->compile_id(); 2188 int osr_bci = task->osr_bci(); 2189 bool is_osr = (osr_bci != standard_entry_bci); 2190 bool should_log = (thread->log() != nullptr); 2191 bool should_break = false; 2192 const int task_level = task->comp_level(); 2193 AbstractCompiler* comp = task->compiler(); 2194 { 2195 // create the handle inside it's own block so it can't 2196 // accidentally be referenced once the thread transitions to 2197 // native. The NoHandleMark before the transition should catch 2198 // any cases where this occurs in the future. 2199 methodHandle method(thread, task->method()); 2200 2201 assert(!method->is_native(), "no longer compile natives"); 2202 2203 // Update compile information when using perfdata. 2204 if (UsePerfData) { 2205 update_compile_perf_data(thread, method, is_osr); 2206 } 2207 2208 DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level)); 2209 } 2210 2211 should_break = directive->BreakAtCompileOption || task->check_break_at_flags(); 2212 if (should_log && !directive->LogOption) { 2213 should_log = false; 2214 } 2215 2216 // Allocate a new set of JNI handles. 2217 JNIHandleMark jhm(thread); 2218 Method* target_handle = task->method(); 2219 int compilable = ciEnv::MethodCompilable; 2220 const char* failure_reason = nullptr; 2221 bool failure_reason_on_C_heap = false; 2222 const char* retry_message = nullptr; 2223 2224 #if INCLUDE_JVMCI 2225 if (UseJVMCICompiler && comp != nullptr && comp->is_jvmci()) { 2226 JVMCICompiler* jvmci = (JVMCICompiler*) comp; 2227 2228 TraceTime t1("compilation", &time); 2229 EventCompilation event; 2230 JVMCICompileState compile_state(task, jvmci); 2231 JVMCIRuntime *runtime = nullptr; 2232 2233 if (JVMCI::in_shutdown()) { 2234 failure_reason = "in JVMCI shutdown"; 2235 retry_message = "not retryable"; 2236 compilable = ciEnv::MethodCompilable_never; 2237 } else if (compile_state.target_method_is_old()) { 2238 // Skip redefined methods 2239 failure_reason = "redefined method"; 2240 retry_message = "not retryable"; 2241 compilable = ciEnv::MethodCompilable_never; 2242 } else { 2243 JVMCIEnv env(thread, &compile_state, __FILE__, __LINE__); 2244 if (env.init_error() != JNI_OK) { 2245 const char* msg = env.init_error_msg(); 2246 failure_reason = os::strdup(err_msg("Error attaching to libjvmci (err: %d, %s)", 2247 env.init_error(), msg == nullptr ? "unknown" : msg), mtJVMCI); 2248 bool reason_on_C_heap = true; 2249 // In case of JNI_ENOMEM, there's a good chance a subsequent attempt to create libjvmci or attach to it 2250 // might succeed. Other errors most likely indicate a non-recoverable error in the JVMCI runtime. 2251 bool retryable = env.init_error() == JNI_ENOMEM; 2252 compile_state.set_failure(retryable, failure_reason, reason_on_C_heap); 2253 } 2254 if (failure_reason == nullptr) { 2255 if (WhiteBoxAPI && WhiteBox::compilation_locked) { 2256 // Must switch to native to block 2257 ThreadToNativeFromVM ttn(thread); 2258 whitebox_lock_compilation(); 2259 } 2260 methodHandle method(thread, target_handle); 2261 runtime = env.runtime(); 2262 runtime->compile_method(&env, jvmci, method, osr_bci); 2263 2264 failure_reason = compile_state.failure_reason(); 2265 failure_reason_on_C_heap = compile_state.failure_reason_on_C_heap(); 2266 if (!compile_state.retryable()) { 2267 retry_message = "not retryable"; 2268 compilable = ciEnv::MethodCompilable_not_at_tier; 2269 } 2270 if (!task->is_success()) { 2271 assert(failure_reason != nullptr, "must specify failure_reason"); 2272 } 2273 } 2274 } 2275 if (!task->is_success() && !JVMCI::in_shutdown()) { 2276 handle_compile_error(thread, task, nullptr, compilable, failure_reason); 2277 } 2278 if (event.should_commit()) { 2279 post_compilation_event(event, task); 2280 } 2281 2282 if (runtime != nullptr) { 2283 runtime->post_compile(thread); 2284 } 2285 } else 2286 #endif // INCLUDE_JVMCI 2287 { 2288 NoHandleMark nhm; 2289 ThreadToNativeFromVM ttn(thread); 2290 2291 ciEnv ci_env(task); 2292 if (should_break) { 2293 ci_env.set_break_at_compile(true); 2294 } 2295 if (should_log) { 2296 ci_env.set_log(thread->log()); 2297 } 2298 assert(thread->env() == &ci_env, "set by ci_env"); 2299 // The thread-env() field is cleared in ~CompileTaskWrapper. 2300 2301 // Cache Jvmti state 2302 bool method_is_old = ci_env.cache_jvmti_state(); 2303 2304 // Skip redefined methods 2305 if (method_is_old) { 2306 ci_env.record_method_not_compilable("redefined method", true); 2307 } 2308 2309 // Cache DTrace flags 2310 ci_env.cache_dtrace_flags(); 2311 2312 ciMethod* target = ci_env.get_method_from_handle(target_handle); 2313 2314 TraceTime t1("compilation", &time); 2315 EventCompilation event; 2316 2317 if (comp == nullptr) { 2318 ci_env.record_method_not_compilable("no compiler"); 2319 } else if (!ci_env.failing()) { 2320 if (WhiteBoxAPI && WhiteBox::compilation_locked) { 2321 whitebox_lock_compilation(); 2322 } 2323 comp->compile_method(&ci_env, target, osr_bci, true, directive); 2324 2325 /* Repeat compilation without installing code for profiling purposes */ 2326 int repeat_compilation_count = directive->RepeatCompilationOption; 2327 while (repeat_compilation_count > 0) { 2328 ResourceMark rm(thread); 2329 task->print_ul("NO CODE INSTALLED"); 2330 comp->compile_method(&ci_env, target, osr_bci, false, directive); 2331 repeat_compilation_count--; 2332 } 2333 } 2334 2335 2336 if (!ci_env.failing() && !task->is_success()) { 2337 assert(ci_env.failure_reason() != nullptr, "expect failure reason"); 2338 assert(false, "compiler should always document failure: %s", ci_env.failure_reason()); 2339 // The compiler elected, without comment, not to register a result. 2340 // Do not attempt further compilations of this method. 2341 ci_env.record_method_not_compilable("compile failed"); 2342 } 2343 2344 // Copy this bit to the enclosing block: 2345 compilable = ci_env.compilable(); 2346 2347 if (ci_env.failing()) { 2348 // Duplicate the failure reason string, so that it outlives ciEnv 2349 failure_reason = os::strdup(ci_env.failure_reason(), mtCompiler); 2350 failure_reason_on_C_heap = true; 2351 retry_message = ci_env.retry_message(); 2352 ci_env.report_failure(failure_reason); 2353 } 2354 2355 if (ci_env.failing()) { 2356 handle_compile_error(thread, task, &ci_env, compilable, failure_reason); 2357 } 2358 if (event.should_commit()) { 2359 post_compilation_event(event, task); 2360 } 2361 } 2362 2363 if (failure_reason != nullptr) { 2364 task->set_failure_reason(failure_reason, failure_reason_on_C_heap); 2365 if (CompilationLog::log() != nullptr) { 2366 CompilationLog::log()->log_failure(thread, task, failure_reason, retry_message); 2367 } 2368 if (PrintCompilation || directive->PrintCompilationOption) { 2369 FormatBufferResource msg = retry_message != nullptr ? 2370 FormatBufferResource("COMPILE SKIPPED: %s (%s)", failure_reason, retry_message) : 2371 FormatBufferResource("COMPILE SKIPPED: %s", failure_reason); 2372 task->print(tty, msg); 2373 } 2374 } 2375 2376 DirectivesStack::release(directive); 2377 2378 methodHandle method(thread, task->method()); 2379 2380 DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success()); 2381 2382 collect_statistics(thread, time, task); 2383 2384 if (PrintCompilation && PrintCompilation2) { 2385 tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp 2386 tty->print("%4d ", compile_id); // print compilation number 2387 tty->print("%s ", (is_osr ? "%" : " ")); 2388 if (task->is_success()) { 2389 tty->print("size: %d(%d) ", task->nm_total_size(), task->nm_insts_size()); 2390 } 2391 tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes()); 2392 } 2393 2394 Log(compilation, codecache) log; 2395 if (log.is_debug()) { 2396 LogStream ls(log.debug()); 2397 codecache_print(&ls, /* detailed= */ false); 2398 } 2399 if (PrintCodeCacheOnCompilation) { 2400 codecache_print(/* detailed= */ false); 2401 } 2402 // Disable compilation, if required. 2403 switch (compilable) { 2404 case ciEnv::MethodCompilable_never: 2405 if (is_osr) 2406 method->set_not_osr_compilable_quietly("MethodCompilable_never"); 2407 else 2408 method->set_not_compilable_quietly("MethodCompilable_never"); 2409 break; 2410 case ciEnv::MethodCompilable_not_at_tier: 2411 if (is_osr) 2412 method->set_not_osr_compilable_quietly("MethodCompilable_not_at_tier", task_level); 2413 else 2414 method->set_not_compilable_quietly("MethodCompilable_not_at_tier", task_level); 2415 break; 2416 } 2417 2418 // Note that the queued_for_compilation bits are cleared without 2419 // protection of a mutex. [They were set by the requester thread, 2420 // when adding the task to the compile queue -- at which time the 2421 // compile queue lock was held. Subsequently, we acquired the compile 2422 // queue lock to get this task off the compile queue; thus (to belabour 2423 // the point somewhat) our clearing of the bits must be occurring 2424 // only after the setting of the bits. See also 14012000 above. 2425 method->clear_queued_for_compilation(); 2426 } 2427 2428 /** 2429 * The CodeCache is full. Print warning and disable compilation. 2430 * Schedule code cache cleaning so compilation can continue later. 2431 * This function needs to be called only from CodeCache::allocate(), 2432 * since we currently handle a full code cache uniformly. 2433 */ 2434 void CompileBroker::handle_full_code_cache(CodeBlobType code_blob_type) { 2435 UseInterpreter = true; 2436 if (UseCompiler || AlwaysCompileLoopMethods ) { 2437 if (xtty != nullptr) { 2438 stringStream s; 2439 // Dump code cache state into a buffer before locking the tty, 2440 // because log_state() will use locks causing lock conflicts. 2441 CodeCache::log_state(&s); 2442 // Lock to prevent tearing 2443 ttyLocker ttyl; 2444 xtty->begin_elem("code_cache_full"); 2445 xtty->print("%s", s.freeze()); 2446 xtty->stamp(); 2447 xtty->end_elem(); 2448 } 2449 2450 #ifndef PRODUCT 2451 if (ExitOnFullCodeCache) { 2452 codecache_print(/* detailed= */ true); 2453 before_exit(JavaThread::current()); 2454 exit_globals(); // will delete tty 2455 vm_direct_exit(1); 2456 } 2457 #endif 2458 if (UseCodeCacheFlushing) { 2459 // Since code cache is full, immediately stop new compiles 2460 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { 2461 log_info(codecache)("Code cache is full - disabling compilation"); 2462 } 2463 } else { 2464 disable_compilation_forever(); 2465 } 2466 2467 CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning()); 2468 } 2469 } 2470 2471 // ------------------------------------------------------------------ 2472 // CompileBroker::update_compile_perf_data 2473 // 2474 // Record this compilation for debugging purposes. 2475 void CompileBroker::update_compile_perf_data(CompilerThread* thread, const methodHandle& method, bool is_osr) { 2476 ResourceMark rm; 2477 char* method_name = method->name()->as_C_string(); 2478 char current_method[CompilerCounters::cmname_buffer_length]; 2479 size_t maxLen = CompilerCounters::cmname_buffer_length; 2480 2481 const char* class_name = method->method_holder()->name()->as_C_string(); 2482 2483 size_t s1len = strlen(class_name); 2484 size_t s2len = strlen(method_name); 2485 2486 // check if we need to truncate the string 2487 if (s1len + s2len + 2 > maxLen) { 2488 2489 // the strategy is to lop off the leading characters of the 2490 // class name and the trailing characters of the method name. 2491 2492 if (s2len + 2 > maxLen) { 2493 // lop of the entire class name string, let snprintf handle 2494 // truncation of the method name. 2495 class_name += s1len; // null string 2496 } 2497 else { 2498 // lop off the extra characters from the front of the class name 2499 class_name += ((s1len + s2len + 2) - maxLen); 2500 } 2501 } 2502 2503 jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name); 2504 2505 int last_compile_type = normal_compile; 2506 if (CICountOSR && is_osr) { 2507 last_compile_type = osr_compile; 2508 } else if (CICountNative && method->is_native()) { 2509 last_compile_type = native_compile; 2510 } 2511 2512 CompilerCounters* counters = thread->counters(); 2513 counters->set_current_method(current_method); 2514 counters->set_compile_type((jlong) last_compile_type); 2515 } 2516 2517 // ------------------------------------------------------------------ 2518 // CompileBroker::collect_statistics 2519 // 2520 // Collect statistics about the compilation. 2521 2522 void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task) { 2523 bool success = task->is_success(); 2524 methodHandle method (thread, task->method()); 2525 int compile_id = task->compile_id(); 2526 bool is_osr = (task->osr_bci() != standard_entry_bci); 2527 const int comp_level = task->comp_level(); 2528 CompilerCounters* counters = thread->counters(); 2529 2530 MutexLocker locker(CompileStatistics_lock); 2531 2532 // _perf variables are production performance counters which are 2533 // updated regardless of the setting of the CITime and CITimeEach flags 2534 // 2535 2536 // account all time, including bailouts and failures in this counter; 2537 // C1 and C2 counters are counting both successful and unsuccessful compiles 2538 _t_total_compilation.add(time); 2539 2540 // Update compilation times. Used by the implementation of JFR CompilerStatistics 2541 // and java.lang.management.CompilationMXBean. 2542 _perf_total_compilation->inc(time.ticks()); 2543 _peak_compilation_time = MAX2(time.milliseconds(), _peak_compilation_time); 2544 2545 if (!success) { 2546 _total_bailout_count++; 2547 if (UsePerfData) { 2548 _perf_last_failed_method->set_value(counters->current_method()); 2549 _perf_last_failed_type->set_value(counters->compile_type()); 2550 _perf_total_bailout_count->inc(); 2551 } 2552 _t_bailedout_compilation.add(time); 2553 } else if (!task->is_success()) { 2554 if (UsePerfData) { 2555 _perf_last_invalidated_method->set_value(counters->current_method()); 2556 _perf_last_invalidated_type->set_value(counters->compile_type()); 2557 _perf_total_invalidated_count->inc(); 2558 } 2559 _total_invalidated_count++; 2560 _t_invalidated_compilation.add(time); 2561 } else { 2562 // Compilation succeeded 2563 if (CITime) { 2564 int bytes_compiled = method->code_size() + task->num_inlined_bytecodes(); 2565 if (is_osr) { 2566 _t_osr_compilation.add(time); 2567 _sum_osr_bytes_compiled += bytes_compiled; 2568 } else { 2569 _t_standard_compilation.add(time); 2570 _sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); 2571 } 2572 2573 // Collect statistic per compilation level 2574 if (comp_level > CompLevel_none && comp_level <= CompLevel_full_optimization) { 2575 CompilerStatistics* stats = &_stats_per_level[comp_level-1]; 2576 if (is_osr) { 2577 stats->_osr.update(time, bytes_compiled); 2578 } else { 2579 stats->_standard.update(time, bytes_compiled); 2580 } 2581 stats->_nmethods_size += task->nm_total_size(); 2582 stats->_nmethods_code_size += task->nm_insts_size(); 2583 } else { 2584 assert(false, "CompilerStatistics object does not exist for compilation level %d", comp_level); 2585 } 2586 2587 // Collect statistic per compiler 2588 AbstractCompiler* comp = compiler(comp_level); 2589 if (comp) { 2590 CompilerStatistics* stats = comp->stats(); 2591 if (is_osr) { 2592 stats->_osr.update(time, bytes_compiled); 2593 } else { 2594 stats->_standard.update(time, bytes_compiled); 2595 } 2596 stats->_nmethods_size += task->nm_total_size(); 2597 stats->_nmethods_code_size += task->nm_insts_size(); 2598 } else { // if (!comp) 2599 assert(false, "Compiler object must exist"); 2600 } 2601 } 2602 2603 if (UsePerfData) { 2604 // save the name of the last method compiled 2605 _perf_last_method->set_value(counters->current_method()); 2606 _perf_last_compile_type->set_value(counters->compile_type()); 2607 _perf_last_compile_size->set_value(method->code_size() + 2608 task->num_inlined_bytecodes()); 2609 if (is_osr) { 2610 _perf_osr_compilation->inc(time.ticks()); 2611 _perf_sum_osr_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes()); 2612 } else { 2613 _perf_standard_compilation->inc(time.ticks()); 2614 _perf_sum_standard_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes()); 2615 } 2616 } 2617 2618 if (CITimeEach) { 2619 double compile_time = time.seconds(); 2620 double bytes_per_sec = compile_time == 0.0 ? 0.0 : (double)(method->code_size() + task->num_inlined_bytecodes()) / compile_time; 2621 tty->print_cr("%3d seconds: %6.3f bytes/sec : %f (bytes %d + %d inlined)", 2622 compile_id, compile_time, bytes_per_sec, method->code_size(), task->num_inlined_bytecodes()); 2623 } 2624 2625 // Collect counts of successful compilations 2626 _sum_nmethod_size += task->nm_total_size(); 2627 _sum_nmethod_code_size += task->nm_insts_size(); 2628 _total_compile_count++; 2629 2630 if (UsePerfData) { 2631 _perf_sum_nmethod_size->inc( task->nm_total_size()); 2632 _perf_sum_nmethod_code_size->inc(task->nm_insts_size()); 2633 _perf_total_compile_count->inc(); 2634 } 2635 2636 if (is_osr) { 2637 if (UsePerfData) _perf_total_osr_compile_count->inc(); 2638 _total_osr_compile_count++; 2639 } else { 2640 if (UsePerfData) _perf_total_standard_compile_count->inc(); 2641 _total_standard_compile_count++; 2642 } 2643 } 2644 // set the current method for the thread to null 2645 if (UsePerfData) counters->set_current_method(""); 2646 } 2647 2648 const char* CompileBroker::compiler_name(int comp_level) { 2649 AbstractCompiler *comp = CompileBroker::compiler(comp_level); 2650 if (comp == nullptr) { 2651 return "no compiler"; 2652 } else { 2653 return (comp->name()); 2654 } 2655 } 2656 2657 jlong CompileBroker::total_compilation_ticks() { 2658 return _perf_total_compilation != nullptr ? _perf_total_compilation->get_value() : 0; 2659 } 2660 2661 void CompileBroker::print_times(const char* name, CompilerStatistics* stats) { 2662 tty->print_cr(" %s {speed: %6.3f bytes/s; standard: %6.3f s, %u bytes, %u methods; osr: %6.3f s, %u bytes, %u methods; nmethods_size: %u bytes; nmethods_code_size: %u bytes}", 2663 name, stats->bytes_per_second(), 2664 stats->_standard._time.seconds(), stats->_standard._bytes, stats->_standard._count, 2665 stats->_osr._time.seconds(), stats->_osr._bytes, stats->_osr._count, 2666 stats->_nmethods_size, stats->_nmethods_code_size); 2667 } 2668 2669 void CompileBroker::print_times(bool per_compiler, bool aggregate) { 2670 if (per_compiler) { 2671 if (aggregate) { 2672 tty->cr(); 2673 tty->print_cr("Individual compiler times (for compiled methods only)"); 2674 tty->print_cr("------------------------------------------------"); 2675 tty->cr(); 2676 } 2677 for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) { 2678 AbstractCompiler* comp = _compilers[i]; 2679 if (comp != nullptr) { 2680 print_times(comp->name(), comp->stats()); 2681 } 2682 } 2683 if (aggregate) { 2684 tty->cr(); 2685 tty->print_cr("Individual compilation Tier times (for compiled methods only)"); 2686 tty->print_cr("------------------------------------------------"); 2687 tty->cr(); 2688 } 2689 char tier_name[256]; 2690 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) { 2691 CompilerStatistics* stats = &_stats_per_level[tier-1]; 2692 os::snprintf_checked(tier_name, sizeof(tier_name), "Tier%d", tier); 2693 print_times(tier_name, stats); 2694 } 2695 } 2696 2697 if (!aggregate) { 2698 return; 2699 } 2700 2701 elapsedTimer standard_compilation = CompileBroker::_t_standard_compilation; 2702 elapsedTimer osr_compilation = CompileBroker::_t_osr_compilation; 2703 elapsedTimer total_compilation = CompileBroker::_t_total_compilation; 2704 2705 uint standard_bytes_compiled = CompileBroker::_sum_standard_bytes_compiled; 2706 uint osr_bytes_compiled = CompileBroker::_sum_osr_bytes_compiled; 2707 2708 uint standard_compile_count = CompileBroker::_total_standard_compile_count; 2709 uint osr_compile_count = CompileBroker::_total_osr_compile_count; 2710 uint total_compile_count = CompileBroker::_total_compile_count; 2711 uint total_bailout_count = CompileBroker::_total_bailout_count; 2712 uint total_invalidated_count = CompileBroker::_total_invalidated_count; 2713 2714 uint nmethods_code_size = CompileBroker::_sum_nmethod_code_size; 2715 uint nmethods_size = CompileBroker::_sum_nmethod_size; 2716 2717 tty->cr(); 2718 tty->print_cr("Accumulated compiler times"); 2719 tty->print_cr("----------------------------------------------------------"); 2720 //0000000000111111111122222222223333333333444444444455555555556666666666 2721 //0123456789012345678901234567890123456789012345678901234567890123456789 2722 tty->print_cr(" Total compilation time : %7.3f s", total_compilation.seconds()); 2723 tty->print_cr(" Standard compilation : %7.3f s, Average : %2.3f s", 2724 standard_compilation.seconds(), 2725 standard_compile_count == 0 ? 0.0 : standard_compilation.seconds() / standard_compile_count); 2726 tty->print_cr(" Bailed out compilation : %7.3f s, Average : %2.3f s", 2727 CompileBroker::_t_bailedout_compilation.seconds(), 2728 total_bailout_count == 0 ? 0.0 : CompileBroker::_t_bailedout_compilation.seconds() / total_bailout_count); 2729 tty->print_cr(" On stack replacement : %7.3f s, Average : %2.3f s", 2730 osr_compilation.seconds(), 2731 osr_compile_count == 0 ? 0.0 : osr_compilation.seconds() / osr_compile_count); 2732 tty->print_cr(" Invalidated : %7.3f s, Average : %2.3f s", 2733 CompileBroker::_t_invalidated_compilation.seconds(), 2734 total_invalidated_count == 0 ? 0.0 : CompileBroker::_t_invalidated_compilation.seconds() / total_invalidated_count); 2735 2736 AbstractCompiler *comp = compiler(CompLevel_simple); 2737 if (comp != nullptr) { 2738 tty->cr(); 2739 comp->print_timers(); 2740 } 2741 comp = compiler(CompLevel_full_optimization); 2742 if (comp != nullptr) { 2743 tty->cr(); 2744 comp->print_timers(); 2745 } 2746 #if INCLUDE_JVMCI 2747 if (EnableJVMCI) { 2748 JVMCICompiler *jvmci_comp = JVMCICompiler::instance(false, JavaThread::current_or_null()); 2749 if (jvmci_comp != nullptr && jvmci_comp != comp) { 2750 tty->cr(); 2751 jvmci_comp->print_timers(); 2752 } 2753 } 2754 #endif 2755 2756 tty->cr(); 2757 tty->print_cr(" Total compiled methods : %8u methods", total_compile_count); 2758 tty->print_cr(" Standard compilation : %8u methods", standard_compile_count); 2759 tty->print_cr(" On stack replacement : %8u methods", osr_compile_count); 2760 uint tcb = osr_bytes_compiled + standard_bytes_compiled; 2761 tty->print_cr(" Total compiled bytecodes : %8u bytes", tcb); 2762 tty->print_cr(" Standard compilation : %8u bytes", standard_bytes_compiled); 2763 tty->print_cr(" On stack replacement : %8u bytes", osr_bytes_compiled); 2764 double tcs = total_compilation.seconds(); 2765 uint bps = tcs == 0.0 ? 0 : (uint)(tcb / tcs); 2766 tty->print_cr(" Average compilation speed : %8u bytes/s", bps); 2767 tty->cr(); 2768 tty->print_cr(" nmethod code size : %8u bytes", nmethods_code_size); 2769 tty->print_cr(" nmethod total size : %8u bytes", nmethods_size); 2770 } 2771 2772 // Print general/accumulated JIT information. 2773 void CompileBroker::print_info(outputStream *out) { 2774 if (out == nullptr) out = tty; 2775 out->cr(); 2776 out->print_cr("======================"); 2777 out->print_cr(" General JIT info "); 2778 out->print_cr("======================"); 2779 out->cr(); 2780 out->print_cr(" JIT is : %7s", should_compile_new_jobs() ? "on" : "off"); 2781 out->print_cr(" Compiler threads : %7d", (int)CICompilerCount); 2782 out->cr(); 2783 out->print_cr("CodeCache overview"); 2784 out->print_cr("--------------------------------------------------------"); 2785 out->cr(); 2786 out->print_cr(" Reserved size : %7zu KB", CodeCache::max_capacity() / K); 2787 out->print_cr(" Committed size : %7zu KB", CodeCache::capacity() / K); 2788 out->print_cr(" Unallocated capacity : %7zu KB", CodeCache::unallocated_capacity() / K); 2789 out->cr(); 2790 } 2791 2792 // Note: tty_lock must not be held upon entry to this function. 2793 // Print functions called from herein do "micro-locking" on tty_lock. 2794 // That's a tradeoff which keeps together important blocks of output. 2795 // At the same time, continuous tty_lock hold time is kept in check, 2796 // preventing concurrently printing threads from stalling a long time. 2797 void CompileBroker::print_heapinfo(outputStream* out, const char* function, size_t granularity) { 2798 TimeStamp ts_total; 2799 TimeStamp ts_global; 2800 TimeStamp ts; 2801 2802 bool allFun = !strcmp(function, "all"); 2803 bool aggregate = !strcmp(function, "aggregate") || !strcmp(function, "analyze") || allFun; 2804 bool usedSpace = !strcmp(function, "UsedSpace") || allFun; 2805 bool freeSpace = !strcmp(function, "FreeSpace") || allFun; 2806 bool methodCount = !strcmp(function, "MethodCount") || allFun; 2807 bool methodSpace = !strcmp(function, "MethodSpace") || allFun; 2808 bool methodAge = !strcmp(function, "MethodAge") || allFun; 2809 bool methodNames = !strcmp(function, "MethodNames") || allFun; 2810 bool discard = !strcmp(function, "discard") || allFun; 2811 2812 if (out == nullptr) { 2813 out = tty; 2814 } 2815 2816 if (!(aggregate || usedSpace || freeSpace || methodCount || methodSpace || methodAge || methodNames || discard)) { 2817 out->print_cr("\n__ CodeHeapStateAnalytics: Function %s is not supported", function); 2818 out->cr(); 2819 return; 2820 } 2821 2822 ts_total.update(); // record starting point 2823 2824 if (aggregate) { 2825 print_info(out); 2826 } 2827 2828 // We hold the CodeHeapStateAnalytics_lock all the time, from here until we leave this function. 2829 // That prevents other threads from destroying (making inconsistent) our view on the CodeHeap. 2830 // When we request individual parts of the analysis via the jcmd interface, it is possible 2831 // that in between another thread (another jcmd user or the vm running into CodeCache OOM) 2832 // updated the aggregated data. We will then see a modified, but again consistent, view 2833 // on the CodeHeap. That's a tolerable tradeoff we have to accept because we can't hold 2834 // a lock across user interaction. 2835 2836 // We should definitely acquire this lock before acquiring Compile_lock and CodeCache_lock. 2837 // CodeHeapStateAnalytics_lock may be held by a concurrent thread for a long time, 2838 // leading to an unnecessarily long hold time of the other locks we acquired before. 2839 ts.update(); // record starting point 2840 MutexLocker mu0(CodeHeapStateAnalytics_lock, Mutex::_safepoint_check_flag); 2841 out->print_cr("\n__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________\n", ts.seconds()); 2842 2843 // Holding the CodeCache_lock protects from concurrent alterations of the CodeCache. 2844 // Unfortunately, such protection is not sufficient: 2845 // When a new nmethod is created via ciEnv::register_method(), the 2846 // Compile_lock is taken first. After some initializations, 2847 // nmethod::new_nmethod() takes over, grabbing the CodeCache_lock 2848 // immediately (after finalizing the oop references). To lock out concurrent 2849 // modifiers, we have to grab both locks as well in the described sequence. 2850 // 2851 // If we serve an "allFun" call, it is beneficial to hold CodeCache_lock and Compile_lock 2852 // for the entire duration of aggregation and printing. That makes sure we see 2853 // a consistent picture and do not run into issues caused by concurrent alterations. 2854 bool should_take_Compile_lock = !SafepointSynchronize::is_at_safepoint() && 2855 !Compile_lock->owned_by_self(); 2856 bool should_take_CodeCache_lock = !SafepointSynchronize::is_at_safepoint() && 2857 !CodeCache_lock->owned_by_self(); 2858 bool take_global_lock_1 = allFun && should_take_Compile_lock; 2859 bool take_global_lock_2 = allFun && should_take_CodeCache_lock; 2860 bool take_function_lock_1 = !allFun && should_take_Compile_lock; 2861 bool take_function_lock_2 = !allFun && should_take_CodeCache_lock; 2862 bool take_global_locks = take_global_lock_1 || take_global_lock_2; 2863 bool take_function_locks = take_function_lock_1 || take_function_lock_2; 2864 2865 ts_global.update(); // record starting point 2866 2867 ConditionalMutexLocker mu1(Compile_lock, take_global_lock_1, Mutex::_safepoint_check_flag); 2868 ConditionalMutexLocker mu2(CodeCache_lock, take_global_lock_2, Mutex::_no_safepoint_check_flag); 2869 if (take_global_locks) { 2870 out->print_cr("\n__ Compile & CodeCache (global) lock wait took %10.3f seconds _________\n", ts_global.seconds()); 2871 ts_global.update(); // record starting point 2872 } 2873 2874 if (aggregate) { 2875 ts.update(); // record starting point 2876 ConditionalMutexLocker mu11(Compile_lock, take_function_lock_1, Mutex::_safepoint_check_flag); 2877 ConditionalMutexLocker mu22(CodeCache_lock, take_function_lock_2, Mutex::_no_safepoint_check_flag); 2878 if (take_function_locks) { 2879 out->print_cr("\n__ Compile & CodeCache (function) lock wait took %10.3f seconds _________\n", ts.seconds()); 2880 } 2881 2882 ts.update(); // record starting point 2883 CodeCache::aggregate(out, granularity); 2884 if (take_function_locks) { 2885 out->print_cr("\n__ Compile & CodeCache (function) lock hold took %10.3f seconds _________\n", ts.seconds()); 2886 } 2887 } 2888 2889 if (usedSpace) CodeCache::print_usedSpace(out); 2890 if (freeSpace) CodeCache::print_freeSpace(out); 2891 if (methodCount) CodeCache::print_count(out); 2892 if (methodSpace) CodeCache::print_space(out); 2893 if (methodAge) CodeCache::print_age(out); 2894 if (methodNames) { 2895 if (allFun) { 2896 // print_names() can only be used safely if the locks have been continuously held 2897 // since aggregation begin. That is true only for function "all". 2898 CodeCache::print_names(out); 2899 } else { 2900 out->print_cr("\nCodeHeapStateAnalytics: Function 'MethodNames' is only available as part of function 'all'"); 2901 } 2902 } 2903 if (discard) CodeCache::discard(out); 2904 2905 if (take_global_locks) { 2906 out->print_cr("\n__ Compile & CodeCache (global) lock hold took %10.3f seconds _________\n", ts_global.seconds()); 2907 } 2908 out->print_cr("\n__ CodeHeapStateAnalytics total duration %10.3f seconds _________\n", ts_total.seconds()); 2909 }